From 1c577f4d5744fa49bc0b784e066860ae82cfb4de Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 19 Apr 2025 20:33:25 +0200 Subject: [PATCH 001/294] Fix NPY_FINLINE macro for C++ class methods Remove 'static' qualifier in C++ mode to allow NPY_FINLINE usage within class methods. --- numpy/_core/include/numpy/npy_common.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 79ad8ad78cb2..96b6ce67bf64 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -98,11 +98,23 @@ #endif #ifdef _MSC_VER - #define NPY_FINLINE static __forceinline + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif #elif defined(__GNUC__) - #define NPY_FINLINE static inline __attribute__((always_inline)) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif #else - #define NPY_FINLINE static + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif #endif #if defined(_MSC_VER) From c952b9a82a120934793efc4e30ffca3858d04397 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 24 Nov 2023 11:01:14 +0200 Subject: [PATCH 002/294] ENH: Enable native half-precision scalar conversion operations on ARM --- numpy/_core/src/common/half.hpp | 81 ++++++--------------------- numpy/_core/src/npymath/halffloat.cpp | 28 ++------- 2 files changed, 20 insertions(+), 89 deletions(-) diff --git a/numpy/_core/src/common/half.hpp b/numpy/_core/src/common/half.hpp index 484750ad84cd..14dabbe79d7f 100644 --- a/numpy/_core/src/common/half.hpp +++ b/numpy/_core/src/common/half.hpp @@ -9,8 +9,6 @@ // TODO(@seiko2plus): // - covers half-precision operations that being supported by numpy/halffloat.h // - add support for arithmetic operations -// - enables __fp16 causes massive FP exceptions on aarch64, -// needs a deep investigation namespace np { @@ -19,42 +17,19 @@ namespace np { /// Provides a type that implements 16-bit floating point (half-precision). /// This type is ensured to be 16-bit size. -#if 1 // ndef __ARM_FP16_FORMAT_IEEE class Half final { public: - /// Whether `Half` has a full native HW support. - static constexpr bool kNative = false; - /// Whether `Half` has a native HW support for single/double conversion. - template - static constexpr bool kNativeConversion = ( - ( - std::is_same_v && - #if defined(NPY_HAVE_FP16) || defined(NPY_HAVE_VSX3) - true - #else - false - #endif - ) || ( - std::is_same_v && - #if defined(NPY_HAVE_AVX512FP16) || (defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE)) - true - #else - false - #endif - ) - ); - /// Default constructor. initialize nothing. Half() = default; /// Construct from float /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(float f) + NPY_FINLINE explicit Half(float f) { #if defined(NPY_HAVE_FP16) __m128 mf = _mm_load_ss(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT))); + bits_ = _mm_extract_epi16(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX_ASM) __vector float vf32 = vec_splats(f); __vector unsigned short vf16; @@ -64,6 +39,9 @@ class Half final { #else bits_ = vec_extract(vf16, 0); #endif + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromFloatBits(BitCast(f)); #endif @@ -72,20 +50,23 @@ class Half final { /// Construct from double. /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(double f) + NPY_FINLINE explicit Half(double f) { #if defined(NPY_HAVE_AVX512FP16) __m128d md = _mm_load_sd(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_castph_si128(_mm_cvtpd_ph(md)))); + bits_ = _mm_extract_epi16(_mm_castph_si128(_mm_cvtpd_ph(md)), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits_) : "wa" (f)); + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromDoubleBits(BitCast(f)); #endif } /// Cast to float - explicit operator float() const + NPY_FINLINE explicit operator float() const { #if defined(NPY_HAVE_FP16) float ret; @@ -99,13 +80,15 @@ class Half final { : "=wa"(vf32) : "wa"(vec_splats(bits_))); return vec_extract(vf32, 0); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return float(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToFloatBits(bits_)); #endif } /// Cast to double - explicit operator double() const + NPY_FINLINE explicit operator double() const { #if defined(NPY_HAVE_AVX512FP16) double ret; @@ -117,6 +100,8 @@ class Half final { : "=wa"(f64) : "wa"(bits_)); return f64; + #elif defined(__ARM_FP16_FORMAT_IEEE) + return double(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToDoubleBits(bits_)); #endif @@ -223,40 +208,6 @@ class Half final { private: uint16_t bits_; }; -#else // __ARM_FP16_FORMAT_IEEE -class Half final { - public: - static constexpr bool kNative = true; - template - static constexpr bool kNativeConversion = ( - std::is_same_v || std::is_same_v - ); - Half() = default; - constexpr Half(__fp16 h) : half_(h) - {} - constexpr operator __fp16() const - { return half_; } - static Half FromBits(uint16_t bits) - { - Half h; - h.half_ = BitCast<__fp16>(bits); - return h; - } - uint16_t Bits() const - { return BitCast(half_); } - constexpr bool Less(Half r) const - { return half_ < r.half_; } - constexpr bool LessEqual(Half r) const - { return half_ <= r.half_; } - constexpr bool Equal(Half r) const - { return half_ == r.half_; } - constexpr bool IsNaN() const - { return half_ != half_; } - - private: - __fp16 half_; -}; -#endif // __ARM_FP16_FORMAT_IEEE /// @} cpp_core_types diff --git a/numpy/_core/src/npymath/halffloat.cpp b/numpy/_core/src/npymath/halffloat.cpp index aa582c1b9517..9289a659f5f5 100644 --- a/numpy/_core/src/npymath/halffloat.cpp +++ b/numpy/_core/src/npymath/halffloat.cpp @@ -198,41 +198,21 @@ npy_half npy_half_divmod(npy_half h1, npy_half h2, npy_half *modulus) npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(f))); - } - else { - return half_private::FromFloatBits(f); - } + return BitCast(Half(BitCast(f))); } npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(d))); - } - else { - return half_private::FromDoubleBits(d); - } + return BitCast(Half(BitCast(d))); } npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToFloatBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToDoubleBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } From db5535a0a565b6847a9054b40c8941dd46ec75de Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 12:39:17 +0200 Subject: [PATCH 003/294] MNT: constant string arrays instead of pointers in C --- numpy/_core/src/common/npy_cpu_features.c | 2 +- numpy/_core/src/multiarray/compiled_base.c | 6 +++--- numpy/_core/src/multiarray/stringdtype/casts.cpp | 16 ++++++++-------- .../src/multiarray/stringdtype/static_string.c | 2 +- numpy/f2py/rules.py | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 8810182812e5..e1fd8404d3bf 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -277,7 +277,7 @@ npy__cpu_check_env(int disable, const char *env) { char *notsupp_cur = ¬supp[0]; //comma and space including (htab, vtab, CR, LF, FF) - const char *delim = ", \t\v\r\n\f"; + const char delim[] = ", \t\v\r\n\f"; char *feature = strtok(features, delim); while (feature) { if (npy__cpu_baseline_fid(feature) > 0){ diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 26b898fa1479..86b60cf75944 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -920,11 +920,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t return NULL; } -static const char *EMPTY_SEQUENCE_ERR_MSG = "indices must be integral: the provided " \ +static const char EMPTY_SEQUENCE_ERR_MSG[] = "indices must be integral: the provided " \ "empty sequence was inferred as float. Wrap it with " \ "'np.array(indices, dtype=np.intp)'"; -static const char *NON_INTEGRAL_ERROR_MSG = "only int indices permitted"; +static const char NON_INTEGRAL_ERROR_MSG[] = "only int indices permitted"; /* Convert obj to an ndarray with integer dtype or fail */ static PyArrayObject * @@ -1465,7 +1465,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyObject *obj; PyObject *str; const char *docstr; - static char *msg = "already has a different docstring"; + static const char msg[] = "already has a different docstring"; /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index f66727501f97..c69818173e24 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -605,7 +605,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul const npy_packed_static_string *ps = (npy_packed_static_string *)in; int isnull = NpyString_load(allocator, ps, string_to_load); if (isnull == -1) { - const char *msg = "Failed to load string for conversion to a non-nullable type"; + const char msg[] = "Failed to load string for conversion to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_MemoryError, msg); @@ -617,7 +617,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul } else if (isnull) { if (has_null) { - const char *msg = "Arrays with missing data cannot be converted to a non-nullable type"; + const char msg[] = "Arrays with missing data cannot be converted to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_ValueError, msg); @@ -821,8 +821,8 @@ static PyType_Slot s2int_slots[] = { static const char * make_s2type_name(NPY_TYPES typenum) { - const char *prefix = "cast_StringDType_to_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_StringDType_to_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); @@ -842,14 +842,14 @@ make_s2type_name(NPY_TYPES typenum) { static const char * make_type2s_name(NPY_TYPES typenum) { - const char *prefix = "cast_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); - const char *suffix = "_to_StringDType"; - size_t slen = strlen(suffix); + const char suffix[] = "_to_StringDType"; + size_t slen = sizeof(prefix)/sizeof(char) - 1; char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 02ab7d246a7a..1c29bbb67f7e 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -404,7 +404,7 @@ NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) } } -static const char * const EMPTY_STRING = ""; +static const char EMPTY_STRING[] = ""; /*NUMPY_API * Extract the packed contents of *packed_string* into *unpacked_string*. diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 4122f0a49f17..c10d2afdd097 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1154,7 +1154,7 @@ 'frompyobj': [ ' #setdims#;', ' capi_#varname#_intent |= #intent#;', - (' const char * capi_errmess = "#modulename#.#pyname#:' + (' const char capi_errmess[] = "#modulename#.#pyname#:' ' failed to create array from the #nth# `#varname#`";'), {isintent_hide: ' capi_#varname#_as_array = ndarray_from_pyobj(' From a5401fd0599aa7cb7044c6983d0d7df17d4ed06f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 12:51:22 +0200 Subject: [PATCH 004/294] MNT: alternative handling of trailing null character Trying to avoid this compiler warning: ../numpy/_core/src/multiarray/stringdtype/casts.cpp:860:12: warning: 'char* strncat(char*, const char*, size_t)' specified bound 15 equals source length [-Wstringop-overflow=] 860 | strncat(buf, suffix, slen); | ~~~~~~~^~~~~~~~~~~~~~~~~~~ --- .../src/multiarray/stringdtype/casts.cpp | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index c69818173e24..3632e359c9a9 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -833,10 +833,12 @@ make_s2type_name(NPY_TYPES typenum) { return NULL; } - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); return buf; } @@ -853,11 +855,14 @@ make_type2s_name(NPY_TYPES typenum) { char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); - strncat(buf, suffix, slen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); + p += nlen; + memcpy(p, suffix, slen); return buf; } From 22132f1522f8ac3d278a3773fe071b699df3fc31 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 13:23:05 +0200 Subject: [PATCH 005/294] MNT: more const'ness for arrays of string literals in C --- numpy/_core/src/common/npy_cpu_features.c | 2 +- numpy/_core/src/umath/string_ufuncs.cpp | 12 ++++++------ numpy/_core/src/umath/stringdtype_ufuncs.cpp | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index e1fd8404d3bf..f15f636cdb1e 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -246,7 +246,7 @@ npy__cpu_validate_baseline(void) static int npy__cpu_check_env(int disable, const char *env) { - static const char *names[] = { + static const char *const names[] = { "enable", "disable", "NPY_ENABLE_CPU_FEATURES", "NPY_DISABLE_CPU_FEATURES", "During parsing environment variable: 'NPY_ENABLE_CPU_FEATURES':\n", diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 5b4b67cda625..ffd757815364 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -1521,7 +1521,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = NPY_OBJECT; dtypes[1] = NPY_BOOL; - const char *unary_buffer_method_names[] = { + const char *const unary_buffer_method_names[] = { "isalpha", "isalnum", "isdigit", "isspace", "islower", "isupper", "istitle", "isdecimal", "isnumeric", }; @@ -1635,7 +1635,7 @@ init_string_ufuncs(PyObject *umath) dtypes[2] = dtypes[3] = NPY_INT64; dtypes[4] = NPY_BOOL; - const char *startswith_endswith_names[] = { + const char *const startswith_endswith_names[] = { "startswith", "endswith" }; @@ -1664,7 +1664,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = NPY_OBJECT; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace" }; @@ -1691,7 +1691,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[2] = NPY_OBJECT; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars" }; @@ -1750,7 +1750,7 @@ init_string_ufuncs(PyObject *umath) dtypes[1] = NPY_INT64; - const char *center_ljust_rjust_names[] = { + const char *const center_ljust_rjust_names[] = { "_center", "_ljust", "_rjust" }; @@ -1827,7 +1827,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; dtypes[2] = NPY_INT64; - const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + const char *const partition_names[] = {"_partition_index", "_rpartition_index"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 37ae0a39a349..adcbfd3b7480 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2605,7 +2605,7 @@ add_object_and_unicode_promoters(PyObject *umath, const char* ufunc_name, NPY_NO_EXPORT int init_stringdtype_ufuncs(PyObject *umath) { - static const char *comparison_ufunc_names[6] = { + static const char *const comparison_ufunc_names[6] = { "equal", "not_equal", "less", "less_equal", "greater_equal", "greater", }; @@ -2654,7 +2654,7 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - const char *unary_loop_names[] = { + const char *const unary_loop_names[] = { "isalpha", "isdecimal", "isdigit", "isnumeric", "isspace", "isalnum", "istitle", "isupper", "islower", }; @@ -2874,7 +2874,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace", }; @@ -2898,7 +2898,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars", }; @@ -3082,7 +3082,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType }; - const char *partition_names[] = {"_partition", "_rpartition"}; + const char *const partition_names[] = {"_partition", "_rpartition"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK From 92f2622e376e05620fd4e63fb453bc5d1e5b160a Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 1 Apr 2025 22:58:10 +0200 Subject: [PATCH 006/294] ENH, SIMD: Initial implementation of Highway wrapper A thin wrapper over Google's Highway SIMD library to simplify its interface. This commit provides the implementation of that wrapper, consisting of: - simd.hpp: Main header defining the SIMD namespaces and configuration - simd.inc.hpp: Template header included multiple times with different namespaces The wrapper eliminates Highway's class tags by: - Using lane types directly which can be deduced from arguments - Leveraging namespaces (np::simd and np::simd128) for different register widths A README is included to guide usage and document design decisions. --- numpy/_core/src/common/simd/README.md | 258 +++++++++++++++++++++++ numpy/_core/src/common/simd/simd.hpp | 80 +++++++ numpy/_core/src/common/simd/simd.inc.hpp | 102 +++++++++ 3 files changed, 440 insertions(+) create mode 100644 numpy/_core/src/common/simd/README.md create mode 100644 numpy/_core/src/common/simd/simd.hpp create mode 100644 numpy/_core/src/common/simd/simd.inc.hpp diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md new file mode 100644 index 000000000000..ac58c4a6bd94 --- /dev/null +++ b/numpy/_core/src/common/simd/README.md @@ -0,0 +1,258 @@ +# NumPy SIMD Wrapper for Highway + +This directory contains a lightweight C++ wrapper over Google's [Highway](https://github.com/google/highway) SIMD library, designed specifically for NumPy's needs. + +> **Note**: This directory also contains the C interface of universal intrinsics (under `simd.h`) which is no longer supported. The Highway wrapper described in this document should be used instead for all new SIMD code. + +## Overview + +The wrapper simplifies Highway's SIMD interface by eliminating class tags and using lane types directly, which can be deduced from arguments in most cases. This design makes the SIMD code more intuitive and easier to maintain while still leveraging Highway generic intrinsics. + +## Architecture + +The wrapper consists of two main headers: + +1. `simd.hpp`: The main header that defines namespaces and includes configuration macros +2. `simd.inc.hpp`: Implementation details included by `simd.hpp` multiple times for different namespaces + +Additionally, this directory contains legacy C interface files for universal intrinsics (`simd.h` and related files) which are deprecated and should not be used for new code. All new SIMD code should use the Highway wrapper. + + +## Usage + +### Basic Usage + +```cpp +#include "simd/simd.hpp" + +// Use np::simd for maximum width SIMD operations +using namespace np::simd; +float *data = /* ... */; +Vec v = LoadU(data); +v = Add(v, v); +StoreU(v, data); + +// Use np::simd128 for fixed 128-bit SIMD operations +using namespace np::simd128; +Vec v128 = LoadU(data); +v128 = Add(v128, v128); +StoreU(v128, data); +``` + +### Checking for SIMD Support + +```cpp +#include "simd/simd.hpp" + +// Check if SIMD is enabled +#if NPY_SIMDX + // SIMD code +#else + // Scalar fallback code +#endif + +// Check for float64 support +#if NPY_SIMDX_F64 + // Use float64 SIMD operations +#endif + +// Check for FMA support +#if NPY_SIMDX_FMA + // Use FMA operations +#endif +``` + +## Type Support and Constraints + +The wrapper provides type constraints to help with SFINAE (Substitution Failure Is Not An Error) and compile-time type checking: + +- `kSupportLane`: Determines whether the specified lane type is supported by the SIMD extension. + ```cpp + // Base template - always defined, even when SIMD is not enabled (for SFINAE) + template + constexpr bool kSupportLane = NPY_SIMDX != 0; + template <> + constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; + ``` + + +```cpp +#include "simd/simd.hpp" + +// Check if float64 operations are supported +if constexpr (np::simd::kSupportLane) { + // Use float64 operations +} +``` + +These constraints allow for compile-time checking of which lane types are supported, which can be used in SFINAE contexts to enable or disable functions based on type support. + +## Available Operations + +The wrapper provides the following common operations that are used in NumPy: + +- Vector creation operations: + - `Zero`: Returns a vector with all lanes set to zero + - `Set`: Returns a vector with all lanes set to the given value + - `Undefined`: Returns an uninitialized vector + +- Memory operations: + - `LoadU`: Unaligned load of a vector from memory + - `StoreU`: Unaligned store of a vector to memory + +- Vector information: + - `Lanes`: Returns the number of vector lanes based on the lane type + +- Type conversion: + - `BitCast`: Reinterprets a vector to a different type without modifying the underlying data + - `VecFromMask`: Converts a mask to a vector + +- Comparison operations: + - `Eq`: Element-wise equality comparison + - `Le`: Element-wise less than or equal comparison + - `Lt`: Element-wise less than comparison + - `Gt`: Element-wise greater than comparison + - `Ge`: Element-wise greater than or equal comparison + +- Arithmetic operations: + - `Add`: Element-wise addition + - `Sub`: Element-wise subtraction + - `Mul`: Element-wise multiplication + - `Div`: Element-wise division + - `Min`: Element-wise minimum + - `Max`: Element-wise maximum + - `Abs`: Element-wise absolute value + - `Sqrt`: Element-wise square root + +- Logical operations: + - `And`: Bitwise AND + - `Or`: Bitwise OR + - `Xor`: Bitwise XOR + - `AndNot`: Bitwise AND NOT (a & ~b) + +Additional Highway operations can be accessed via the `hn` namespace alias inside the `simd` or `simd128` namespaces. + +## Extending + +To add more operations from Highway: + +1. Import them in the `simd.inc.hpp` file using the `using` directive if they don't require a tag: + ```cpp + // For operations that don't require a tag + using hn::FunctionName; + ``` + +2. Define wrapper functions for intrinsics that require a class tag: + ```cpp + // For operations that require a tag + template + HWY_API ReturnType FunctionName(Args... args) { + return hn::FunctionName(_Tag(), args...); + } + ``` + +3. Add appropriate documentation and SFINAE constraints if needed + + +## Build Configuration + +The SIMD wrapper automatically disables SIMD operations when optimizations are disabled: + +- When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled +- SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + +## Design Notes + +1. **Why avoid Highway scalar operations?** + - NumPy already provides kernels for scalar operations + - Compilers can better optimize standard library implementations + - Not all Highway intrinsics are fully supported in scalar mode + +2. **Legacy Universal Intrinsics** + - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_SIMDX` macros) + - The legacy code is maintained for compatibility but will eventually be removed + +3. **Feature Detection Constants vs. Highway Constants** + - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check hardware capabilities but don't consider NumPy's build configuration + - Our constants combine both checks: + ```cpp + #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) + ``` + - This ensures SIMD features won't be used when: + - Hardware supports it but NumPy optimization is disabled via meson option: + ``` + option('disable-optimization', type: 'boolean', value: false, + description: 'Disable CPU optimized code (dispatch,simd,unroll...)') + ``` + - Highway target is scalar (`HWY_TARGET == HWY_SCALAR`) + - Using these constants ensures consistent behavior across different compilation settings + - Without this additional layer, code might incorrectly try to use SIMD paths in scalar mode + +4. **Namespace Design** + - `np::simd`: Maximum width SIMD operations (scalable) + - `np::simd128`: Fixed 128-bit SIMD operations + - `hn`: Highway namespace alias (available within the SIMD namespaces) + +5. **Why Namespaces and Why Not Just Use Highway Directly?** + - Highway's design uses class tag types as template parameters (e.g., `Vec>`) when defining vector types + - Many Highway functions require explicitly passing a tag instance as the first parameter + - This class tag-based approach increases verbosity and complexity in user code + - Our wrapper eliminates this by internally managing tags through namespaces, letting users directly use types e.g. `Vec` + - Simple example with raw Highway: + ```cpp + // Highway's approach + float *data = /* ... */; + + namespace hn = hwy::HWY_NAMESPACE; + using namespace hn; + + // Full-width operations + ScalableTag df; // Create a tag instance + Vec v = LoadU(df, data); // LoadU requires a tag instance + StoreU(v, df, data); // StoreU requires a tag instance + + // 128-bit operations + Full128 df128; // Create a 128-bit tag instance + Vec v128 = LoadU(df128, data); // LoadU requires a tag instance + StoreU(v128, df128, data); // StoreU requires a tag instance + ``` + + - Simple example with our wrapper: + ```cpp + // Our wrapper approach + float *data = /* ... */; + + // Full-width operations + using namespace np::simd; + Vec v = LoadU(data); // Full-width vector load + StoreU(v, data); + + // 128-bit operations + using namespace np::simd128; + Vec v128 = LoadU(data); // 128-bit vector load + StoreU(v128, data); + ``` + + - The namespaced approach simplifies code, reduces errors, and provides a more intuitive interface + - It preserves all Highway operations benefits while reducing cognitive overhead + +5. **Why Namespaces Are Essential for This Design?** + - Namespaces allow us to define different internal tag types (`hn::ScalableTag` in `np::simd` vs `hn::Full128` in `np::simd128`) + - This provides a consistent type-based interface (`Vec`) without requiring users to manually create tags + - Enables using the same function names (like `LoadU`) with different implementations based on SIMD width + - Without namespaces, we'd have to either reintroduce tags (defeating the purpose of the wrapper) or create different function names for each variant (e.g., `LoadU` vs `LoadU128`) + +6. **Template Type Parameters** + - `TLane`: The scalar type for each vector lane (e.g., uint8_t, float, double) + + +## Requirements + +- C++17 or later +- Google Highway library + +## License + +Same as NumPy's license diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp new file mode 100644 index 000000000000..698da4adf865 --- /dev/null +++ b/numpy/_core/src/common/simd/simd.hpp @@ -0,0 +1,80 @@ +#ifndef NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ +#define NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ + +/** + * This header provides a thin wrapper over Google's Highway SIMD library. + * + * The wrapper aims to simplify the SIMD interface of Google's Highway by + * get ride of its class tags and use lane types directly which can be deduced + * from the args in most cases. + */ +/** + * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, + * `NPY_SIMDX` is defined to indicate the SIMD availability for Google's Highway + * C++ code. + * + * Highway SIMD is only available when optimization is enabled. + * When NPY_DISABLE_OPTIMIZATION is defined, SIMD operations are disabled + * and the code falls back to scalar implementations. + */ +#ifndef NPY_DISABLE_OPTIMIZATION +#include + +/** + * We avoid using Highway scalar operations for the following reasons: + * 1. We already provide kernels for scalar operations, so falling back to + * the NumPy implementation is more appropriate. Compilers can often + * optimize these better since they rely on standard libraries. + * 2. Not all Highway intrinsics are fully supported in scalar mode. + * + * Therefore, we only enable SIMD when the Highway target is not scalar. + */ +#define NPY_SIMDX (HWY_TARGET != HWY_SCALAR) + +// Indicates if the SIMD operations are available for float16. +#define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) +// Note: Highway requires SIMD extentions with native float32 support, so we don't need +// to check for it. + +// Indicates if the SIMD operations are available for float64. +#define NPY_SIMDX_F64 (NPY_SIMDX && HWY_HAVE_FLOAT64) + +// Indicates if the SIMD floating operations are natively supports fma. +#define NPY_SIMDX_FMA (NPY_SIMDX && HWY_NATIVE_FMA) + +#else +#define NPY_SIMDX 0 +#define NPY_SIMDX_F16 0 +#define NPY_SIMDX_F64 0 +#define NPY_SIMDX_FMA 0 +#endif + +namespace np { + +/// Represents the max SIMD width supported by the platform. +namespace simd { +#if NPY_SIMDX +/// The highway namespace alias. +/// We can not import all the symbols from the HWY_NAMESPACE because it will +/// conflict with the existing symbols in the numpy namespace. +namespace hn = hwy::HWY_NAMESPACE; +// internaly used by the template header +template +using _Tag = hn::ScalableTag; +#endif +#include "simd.inc.hpp" +} // namespace simd + +/// Represents the 128-bit SIMD width. +namespace simd128 { +#if NPY_SIMDX +namespace hn = hwy::HWY_NAMESPACE; +template +using _Tag = hn::Full128; +#endif +#include "simd.inc.hpp" +} // namespace simd128 + +} // namespace np + +#endif // NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp new file mode 100644 index 000000000000..d052d829720a --- /dev/null +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -0,0 +1,102 @@ +#ifndef NPY_SIMDX +#error "This is not a standalone header. Include simd.hpp instead." +#endif + +// NOTE: This file is included by simd.hpp multiple times with different namespaces +// so avoid including any headers here +// #define NPY_SIMDX 1 // uncomment to enable Highlighting + +/** + * Determines whether the specified lane type is supported by the SIMD extension. + * Always defined as false when SIMD is not enabled, so it can be used in SFINAE. + * + * @tparam TLane The lane type to check for support. + */ +template +constexpr bool kSupportLane = NPY_SIMDX != 0; + +#if NPY_SIMDX +// Define lane type support based on Highway capabilities +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; + +/// Represents an N-lane vector based on the specified lane type. +/// @tparam TLane The scalar type for each vector lane +template +using Vec = hn::Vec<_Tag>; + +/// Represents a mask vector with boolean values or as a bitmask. +/// @tparam TLane The scalar type the mask corresponds to +template +using Mask = hn::Mask<_Tag>; + +/// Unaligned load of a vector from memory. +template +HWY_API Vec LoadU(const TLane* ptr) { + return hn::LoadU(_Tag(), ptr); +} + +/// Unaligned store of a vector to memory. +template +HWY_API void StoreU(const Vec& a, TLane* ptr) { + hn::StoreU(a, _Tag(), ptr); +} + +/// Returns the number of vector lanes based on the lane type. +template +HWY_API constexpr size_t Lanes(TLane tag = 0) { + return hn::Lanes(_Tag()); +} + +/// Returns an uninitialized N-lane vector. +template +HWY_API Vec Undefined(TLane tag = 0) { + return hn::Undefined(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to zero. +template +HWY_API Vec Zero(TLane tag = 0) { + return hn::Zero(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to the given value of type `TLane`. +template +HWY_API Vec Set(TLane val) { + return hn::Set(_Tag(), val); +} + +/// Converts a mask to a vector based on the specified lane type. +template +HWY_API Vec VecFromMask(const TMask &m) { + return hn::VecFromMask(_Tag(), m); +} + +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the underlying data. +template +HWY_API Vec BitCast(const TVec &v) { + return hn::BitCast(_Tag(), v); +} + +// Import common Highway intrinsics +using hn::Eq; +using hn::Le; +using hn::Lt; +using hn::Gt; +using hn::Ge; +using hn::And; +using hn::Or; +using hn::Xor; +using hn::AndNot; +using hn::Sub; +using hn::Add; +using hn::Mul; +using hn::Div; +using hn::Min; +using hn::Max; +using hn::Abs; +using hn::Sqrt; + +#endif // NPY_SIMDX From 5d48ec32b7e4f17e9c5934f1e029c25f683dc840 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 12 Apr 2025 19:25:27 +0200 Subject: [PATCH 007/294] SIMD: Update wrapper with improved docs and type support - Fix hardware/platform terminology in documentation for clarity - Add support for long double in template specializations - Add kMaxLanes constant to expose maximum vector width information - Follows clang formatting style for consistency with NumPy codebase. --- numpy/_core/src/common/simd/README.md | 9 ++- numpy/_core/src/common/simd/simd.inc.hpp | 70 ++++++++++++++++-------- 2 files changed, 54 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index ac58c4a6bd94..9a68d1aa1bfc 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -75,6 +75,11 @@ The wrapper provides type constraints to help with SFINAE (Substitution Failure constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; ``` +- `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. + ```cpp + template + constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); + ``` ```cpp #include "simd/simd.hpp" @@ -175,13 +180,13 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d 3. **Feature Detection Constants vs. Highway Constants** - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants - - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check hardware capabilities but don't consider NumPy's build configuration + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration - Our constants combine both checks: ```cpp #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) ``` - This ensures SIMD features won't be used when: - - Hardware supports it but NumPy optimization is disabled via meson option: + - Platform supports it but NumPy optimization is disabled via meson option: ``` option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index d052d829720a..6d12c9a3caeb 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -1,10 +1,10 @@ #ifndef NPY_SIMDX #error "This is not a standalone header. Include simd.hpp instead." +#define NPY_SIMDX 1 // Prevent editors from graying out the happy branch #endif // NOTE: This file is included by simd.hpp multiple times with different namespaces // so avoid including any headers here -// #define NPY_SIMDX 1 // uncomment to enable Highlighting /** * Determines whether the specified lane type is supported by the SIMD extension. @@ -21,6 +21,13 @@ template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; +template <> +constexpr bool kSupportLane = + HWY_HAVE_FLOAT64 != 0 && sizeof(long double) == sizeof(double); + +/// Maximum number of lanes supported by the SIMD extension for the specified lane type. +template +constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); /// Represents an N-lane vector based on the specified lane type. /// @tparam TLane The scalar type for each vector lane @@ -34,69 +41,86 @@ using Mask = hn::Mask<_Tag>; /// Unaligned load of a vector from memory. template -HWY_API Vec LoadU(const TLane* ptr) { +HWY_API Vec +LoadU(const TLane *ptr) +{ return hn::LoadU(_Tag(), ptr); } /// Unaligned store of a vector to memory. template -HWY_API void StoreU(const Vec& a, TLane* ptr) { +HWY_API void +StoreU(const Vec &a, TLane *ptr) +{ hn::StoreU(a, _Tag(), ptr); } /// Returns the number of vector lanes based on the lane type. template -HWY_API constexpr size_t Lanes(TLane tag = 0) { +HWY_API constexpr size_t +Lanes(TLane tag = 0) +{ return hn::Lanes(_Tag()); } /// Returns an uninitialized N-lane vector. template -HWY_API Vec Undefined(TLane tag = 0) { +HWY_API Vec +Undefined(TLane tag = 0) +{ return hn::Undefined(_Tag()); } /// Returns N-lane vector with all lanes equal to zero. template -HWY_API Vec Zero(TLane tag = 0) { +HWY_API Vec +Zero(TLane tag = 0) +{ return hn::Zero(_Tag()); } /// Returns N-lane vector with all lanes equal to the given value of type `TLane`. template -HWY_API Vec Set(TLane val) { +HWY_API Vec +Set(TLane val) +{ return hn::Set(_Tag(), val); } /// Converts a mask to a vector based on the specified lane type. template -HWY_API Vec VecFromMask(const TMask &m) { +HWY_API Vec +VecFromMask(const TMask &m) +{ return hn::VecFromMask(_Tag(), m); } -/// Convert (Reinterpret) an N-lane vector to a different type without modifying the underlying data. +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the +/// underlying data. template -HWY_API Vec BitCast(const TVec &v) { +HWY_API Vec +BitCast(const TVec &v) +{ return hn::BitCast(_Tag(), v); } // Import common Highway intrinsics -using hn::Eq; -using hn::Le; -using hn::Lt; -using hn::Gt; -using hn::Ge; +using hn::Abs; +using hn::Add; using hn::And; -using hn::Or; -using hn::Xor; using hn::AndNot; -using hn::Sub; -using hn::Add; -using hn::Mul; using hn::Div; -using hn::Min; +using hn::Eq; +using hn::Ge; +using hn::Gt; +using hn::Le; +using hn::Lt; using hn::Max; -using hn::Abs; +using hn::Min; +using hn::Mul; +using hn::Or; using hn::Sqrt; +using hn::Sub; +using hn::Xor; -#endif // NPY_SIMDX +#endif // NPY_SIMDX From e099bba5cba528036593321e539c6565a5e69765 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 04:37:59 +0200 Subject: [PATCH 008/294] SIMD: Improve isolation and constexpr handling in wrapper - Add anonymous namespace around implementation to ensure each translation unit gets its own constants based on local flags - Use HWY_LANES_CONSTEXPR for Lanes function to ensure proper constexpr evaluation across platforms --- numpy/_core/src/common/simd/simd.inc.hpp | 8 +++++++- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index 6d12c9a3caeb..64d28bc47118 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -3,6 +3,10 @@ #define NPY_SIMDX 1 // Prevent editors from graying out the happy branch #endif +// Using anonymous namespace instead of inline to ensure each translation unit +// gets its own copy of constants based on local compilation flags +namespace { + // NOTE: This file is included by simd.hpp multiple times with different namespaces // so avoid including any headers here @@ -57,7 +61,7 @@ StoreU(const Vec &a, TLane *ptr) /// Returns the number of vector lanes based on the lane type. template -HWY_API constexpr size_t +HWY_API HWY_LANES_CONSTEXPR size_t Lanes(TLane tag = 0) { return hn::Lanes(_Tag()); @@ -124,3 +128,5 @@ using hn::Sub; using hn::Xor; #endif // NPY_SIMDX + +} // namespace anonymous diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index ae696db4cd4a..9ce2571a1528 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -3,7 +3,9 @@ #include "loops_utils.h" #include "simd/simd.h" +#include "simd/simd.hpp" #include + namespace hn = hwy::HWY_NAMESPACE; /* From 1fd2076a37e35b0d2f9cf84195117f65637c185d Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 16:13:20 +0200 Subject: [PATCH 009/294] Update Highway submodule to latest master --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 0b696633f9ad..12b325bc1793 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 0b696633f9ad89497dd5532b55eaa01625ad71ca +Subproject commit 12b325bc1793dee68ab2157995a690db859fe9e0 From 80e6a30f3efc30d067d7aa28568ff854572f8a89 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 16:27:30 +0200 Subject: [PATCH 010/294] SIMD: Fix compile error by using MaxLanes instead of Lanes for array size Replace hn::Lanes(f64) with hn::MaxLanes(f64) when defining the index array size to fix error C2131: "expression did not evaluate to a constant". This error occurs because Lanes() isn't always constexpr compatible, especially with scalable vector extensions. MaxLanes() provides a compile-time constant value suitable for static array allocation and should be used with non-scalable SIMD extensions when defining fixed-size arrays. --- numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src | 2 +- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src index 8c66229942ee..93d288fbdb2e 100755 --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src @@ -385,7 +385,7 @@ simd_tanh_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_ vec_f64 b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16; if constexpr(hn::MaxLanes(f64) == 2){ vec_f64 e0e1_0, e0e1_1; - uint64_t index[hn::Lanes(f64)]; + uint64_t index[hn::MaxLanes(f64)]; hn::StoreU(idx, u64, index); /**begin repeat diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 9ce2571a1528..d298a8596cc4 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -186,7 +186,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, "larger than 256 bits."); simd_maski = ((uint8_t *)&simd_maski)[0]; #endif - float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::Lanes(f32)]; + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::MaxLanes(f32)]; hn::Store(x_in, f32, ip_fback); // process elements using libc for large elements From 6519b288d66dcc7ed27cfeb664a1eb2d279d8492 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 17 May 2025 10:18:39 +0300 Subject: [PATCH 011/294] SIMD: Rename NPY_SIMDX to NPY_HWY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename Highway wrapper macros for clarity: - NPY_SIMDX → NPY_HWY - NPY_SIMDX_F16 → NPY_HWY_F16 - NPY_SIMDX_F64 → NPY_HWY_F64 - NPY_SIMDX_FMA → NPY_HWY_FMA To avoids confusion with legacy SIMD macros. --- numpy/_core/src/common/simd/README.md | 16 ++++++++-------- numpy/_core/src/common/simd/simd.hpp | 22 +++++++++++----------- numpy/_core/src/common/simd/simd.inc.hpp | 12 ++++++------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index 9a68d1aa1bfc..e69bab0e7ba5 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -45,19 +45,19 @@ StoreU(v128, data); #include "simd/simd.hpp" // Check if SIMD is enabled -#if NPY_SIMDX +#if NPY_HWY // SIMD code #else // Scalar fallback code #endif // Check for float64 support -#if NPY_SIMDX_F64 +#if NPY_HWY_F64 // Use float64 SIMD operations #endif // Check for FMA support -#if NPY_SIMDX_FMA +#if NPY_HWY_FMA // Use FMA operations #endif ``` @@ -70,9 +70,9 @@ The wrapper provides type constraints to help with SFINAE (Substitution Failure ```cpp // Base template - always defined, even when SIMD is not enabled (for SFINAE) template - constexpr bool kSupportLane = NPY_SIMDX != 0; + constexpr bool kSupportLane = NPY_HWY != 0; template <> - constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; + constexpr bool kSupportLane = NPY_HWY_F64 != 0; ``` - `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. @@ -175,15 +175,15 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d 2. **Legacy Universal Intrinsics** - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated - - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_SIMDX` macros) + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_HWY` macros) - The legacy code is maintained for compatibility but will eventually be removed 3. **Feature Detection Constants vs. Highway Constants** - - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants + - NumPy-specific constants (`NPY_HWY_F16`, `NPY_HWY_F64`, `NPY_HWY_FMA`) provide additional safety beyond raw Highway constants - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration - Our constants combine both checks: ```cpp - #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) + #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) ``` - This ensures SIMD features won't be used when: - Platform supports it but NumPy optimization is disabled via meson option: diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index 698da4adf865..c4e572dc8d0f 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -10,7 +10,7 @@ */ /** * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, - * `NPY_SIMDX` is defined to indicate the SIMD availability for Google's Highway + * `NPY_HWY` is defined to indicate the SIMD availability for Google's Highway * C++ code. * * Highway SIMD is only available when optimization is enabled. @@ -29,31 +29,31 @@ * * Therefore, we only enable SIMD when the Highway target is not scalar. */ -#define NPY_SIMDX (HWY_TARGET != HWY_SCALAR) +#define NPY_HWY (HWY_TARGET != HWY_SCALAR) // Indicates if the SIMD operations are available for float16. -#define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) +#define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) // Note: Highway requires SIMD extentions with native float32 support, so we don't need // to check for it. // Indicates if the SIMD operations are available for float64. -#define NPY_SIMDX_F64 (NPY_SIMDX && HWY_HAVE_FLOAT64) +#define NPY_HWY_F64 (NPY_HWY && HWY_HAVE_FLOAT64) // Indicates if the SIMD floating operations are natively supports fma. -#define NPY_SIMDX_FMA (NPY_SIMDX && HWY_NATIVE_FMA) +#define NPY_HWY_FMA (NPY_HWY && HWY_NATIVE_FMA) #else -#define NPY_SIMDX 0 -#define NPY_SIMDX_F16 0 -#define NPY_SIMDX_F64 0 -#define NPY_SIMDX_FMA 0 +#define NPY_HWY 0 +#define NPY_HWY_F16 0 +#define NPY_HWY_F64 0 +#define NPY_HWY_FMA 0 #endif namespace np { /// Represents the max SIMD width supported by the platform. namespace simd { -#if NPY_SIMDX +#if NPY_HWY /// The highway namespace alias. /// We can not import all the symbols from the HWY_NAMESPACE because it will /// conflict with the existing symbols in the numpy namespace. @@ -67,7 +67,7 @@ using _Tag = hn::ScalableTag; /// Represents the 128-bit SIMD width. namespace simd128 { -#if NPY_SIMDX +#if NPY_HWY namespace hn = hwy::HWY_NAMESPACE; template using _Tag = hn::Full128; diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index 64d28bc47118..f4a2540927dd 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -1,6 +1,6 @@ -#ifndef NPY_SIMDX +#ifndef NPY_HWY #error "This is not a standalone header. Include simd.hpp instead." -#define NPY_SIMDX 1 // Prevent editors from graying out the happy branch +#define NPY_HWY 1 // Prevent editors from graying out the happy branch #endif // Using anonymous namespace instead of inline to ensure each translation unit @@ -17,9 +17,9 @@ namespace { * @tparam TLane The lane type to check for support. */ template -constexpr bool kSupportLane = NPY_SIMDX != 0; +constexpr bool kSupportLane = NPY_HWY != 0; -#if NPY_SIMDX +#if NPY_HWY // Define lane type support based on Highway capabilities template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; @@ -127,6 +127,6 @@ using hn::Sqrt; using hn::Sub; using hn::Xor; -#endif // NPY_SIMDX +#endif // NPY_HWY -} // namespace anonymous +} // namespace From 275e45ccc3c59759a77306031f3ca66601a70807 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 19 May 2025 18:27:58 +0300 Subject: [PATCH 012/294] Disable Highway EMU128 scalar emulation Skip Highway's EMU128 in favor of NumPy's scalar implementations for due to strict IEEE 754 floating-point compliance requirements --- numpy/_core/src/common/simd/README.md | 3 +++ numpy/_core/src/common/simd/simd.hpp | 18 ++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index e69bab0e7ba5..a13a0f75b6fc 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -165,6 +165,7 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled - SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + and not EMU128 (`HWY_TARGET != HWY_EMU128`) ## Design Notes @@ -172,6 +173,8 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - NumPy already provides kernels for scalar operations - Compilers can better optimize standard library implementations - Not all Highway intrinsics are fully supported in scalar mode + - For strict IEEE 754 floating-point compliance requirements, direct scalar + implementations offer more predictable behavior than EMU128 2. **Legacy Universal Intrinsics** - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index c4e572dc8d0f..40556a68c59d 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -22,14 +22,20 @@ /** * We avoid using Highway scalar operations for the following reasons: - * 1. We already provide kernels for scalar operations, so falling back to - * the NumPy implementation is more appropriate. Compilers can often - * optimize these better since they rely on standard libraries. - * 2. Not all Highway intrinsics are fully supported in scalar mode. * - * Therefore, we only enable SIMD when the Highway target is not scalar. + * 1. NumPy already provides optimized kernels for scalar operations. Using these + * existing implementations is more consistent with NumPy's architecture and + * allows for compiler optimizations specific to standard library calls. + * + * 2. Not all Highway intrinsics are fully supported in scalar mode, which could + * lead to compilation errors or unexpected behavior for certain operations. + * + * 3. For NumPy's strict IEEE 754 floating-point compliance requirements, direct scalar + * implementations offer more predictable behavior than EMU128. + * + * Therefore, we only enable Highway SIMD when targeting actual SIMD instruction sets. */ -#define NPY_HWY (HWY_TARGET != HWY_SCALAR) +#define NPY_HWY ((HWY_TARGET != HWY_SCALAR) && (HWY_TARGET != HWY_EMU128)) // Indicates if the SIMD operations are available for float16. #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) From 530409d43dbfa27df91ad26752a2cc42d6cf7d37 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 19 May 2025 20:45:51 -0400 Subject: [PATCH 013/294] CI: update cibuildwheel to 3.0.0b1 and enable cp314 [wheel build] --- .github/workflows/wheels.yml | 7 +++++-- tools/wheels/cibw_test_command.sh | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e96021775f3c..f502e6d64d1f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -92,7 +92,7 @@ jobs: - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311", "cp312", "cp313", "cp313t", "pp311"] + python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] @@ -107,6 +107,8 @@ jobs: python: "pp311" - buildplat: [ macos13, macosx_x86_64, openblas ] python: "cp313t" + - buildplat: [ macos13, macosx_x86_64, openblas ] + python: "cp314t" env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} @@ -175,9 +177,10 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 + uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} + CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 60e90ef5beb6..45dbc8a102cf 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,6 +4,10 @@ set -xe PROJECT_DIR="$1" +if [ -d tools ]; then + cd tools +fi + python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From d68ba1ebfc356c4d3df8e4996d76035148573de5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 May 2025 08:55:52 -0400 Subject: [PATCH 014/294] MNT: respond to code review [wheel build] --- .github/workflows/wheels.yml | 3 ++- pyproject.toml | 2 +- tools/wheels/cibw_test_command.sh | 4 +--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f502e6d64d1f..bede99dd6e9a 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -180,7 +180,8 @@ jobs: uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy + # delete when we switch 3.14(t)-dev to 3.14(t) + CIBW_ENABLE: cpython-prerelease - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..b1eba50044fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,7 +148,7 @@ before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +enable = ["cpython-freethreading", "pypy"] [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 45dbc8a102cf..4763ad512b22 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,9 +4,7 @@ set -xe PROJECT_DIR="$1" -if [ -d tools ]; then - cd tools -fi +export PYTHONSAFEPATH=1 python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From 198cc9aade98ef880d4d09315aef9c565ea69b52 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 May 2025 09:08:21 -0400 Subject: [PATCH 015/294] MNT: move back to 'cd tools' hack [wheel build] --- tools/wheels/cibw_test_command.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 4763ad512b22..2d39687a861b 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,7 +4,9 @@ set -xe PROJECT_DIR="$1" -export PYTHONSAFEPATH=1 +if [ -d tools ]; then + cd tools +fi python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From 084f183a66cdaa888205ffeb599aff6c083178cc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 May 2025 16:11:29 -0600 Subject: [PATCH 016/294] BEG, MAINT: Begin NumPy 2.4.0 development. - Create 2.4.0-notes.rst - Update release.rst - Update pavement.py - Update pyproject.toml - Update cversions.txt - Update numpyconfig.h - Delete release fragments --- doc/release/upcoming_changes/26018.change.rst | 7 ----- .../upcoming_changes/26018.performance.rst | 7 ----- .../upcoming_changes/26745.highlight.rst | 10 ------- .../upcoming_changes/27288.improvement.rst | 3 -- .../upcoming_changes/27789.new_function.rst | 5 ---- doc/release/upcoming_changes/27883.c_api.rst | 4 --- doc/release/upcoming_changes/27883.change.rst | 17 ----------- doc/release/upcoming_changes/27998.c_api.rst | 10 ------- doc/release/upcoming_changes/28080.c_api.rst | 1 - .../upcoming_changes/28080.improvement.rst | 2 -- doc/release/upcoming_changes/28102.change.rst | 6 ---- .../upcoming_changes/28105.improvement.rst | 2 -- .../upcoming_changes/28129.deprecation.rst | 4 --- .../upcoming_changes/28205.improvement.rst | 6 ---- .../upcoming_changes/28214.new_feature.rst | 23 --------------- .../upcoming_changes/28250.improvement.rst | 3 -- .../upcoming_changes/28254.expired.rst | 29 ------------------- doc/release/upcoming_changes/28343.change.rst | 1 - doc/release/upcoming_changes/28426.change.rst | 6 ---- doc/release/upcoming_changes/28436.change.rst | 10 ------- .../upcoming_changes/28442.improvement.rst | 1 - doc/release/upcoming_changes/28569.change.rst | 2 -- .../upcoming_changes/28576.new_feature.rst | 15 ---------- doc/release/upcoming_changes/28615.change.rst | 5 ---- .../upcoming_changes/28619.highlight.rst | 6 ---- .../upcoming_changes/28619.performance.rst | 7 ----- .../upcoming_changes/28669.new_feature.rst | 3 -- doc/release/upcoming_changes/28703.change.rst | 3 -- doc/release/upcoming_changes/28713.change.rst | 1 - doc/release/upcoming_changes/28741.change.rst | 1 - .../upcoming_changes/28769.performance.rst | 8 ----- .../upcoming_changes/28856.improvement.rst | 5 ---- .../upcoming_changes/28884.deprecation.rst | 28 ------------------ .../upcoming_changes/28940.new_feature.rst | 6 ---- .../upcoming_changes/28961.expired.rst | 1 - doc/source/release.rst | 1 + doc/source/release/2.4.0-notes.rst | 19 ++++++++++++ numpy/_core/code_generators/cversions.txt | 3 +- numpy/_core/include/numpy/numpyconfig.h | 2 +- pavement.py | 2 +- pyproject.toml | 2 +- 41 files changed, 25 insertions(+), 252 deletions(-) delete mode 100644 doc/release/upcoming_changes/26018.change.rst delete mode 100644 doc/release/upcoming_changes/26018.performance.rst delete mode 100644 doc/release/upcoming_changes/26745.highlight.rst delete mode 100644 doc/release/upcoming_changes/27288.improvement.rst delete mode 100644 doc/release/upcoming_changes/27789.new_function.rst delete mode 100644 doc/release/upcoming_changes/27883.c_api.rst delete mode 100644 doc/release/upcoming_changes/27883.change.rst delete mode 100644 doc/release/upcoming_changes/27998.c_api.rst delete mode 100644 doc/release/upcoming_changes/28080.c_api.rst delete mode 100644 doc/release/upcoming_changes/28080.improvement.rst delete mode 100644 doc/release/upcoming_changes/28102.change.rst delete mode 100644 doc/release/upcoming_changes/28105.improvement.rst delete mode 100644 doc/release/upcoming_changes/28129.deprecation.rst delete mode 100644 doc/release/upcoming_changes/28205.improvement.rst delete mode 100644 doc/release/upcoming_changes/28214.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28250.improvement.rst delete mode 100644 doc/release/upcoming_changes/28254.expired.rst delete mode 100644 doc/release/upcoming_changes/28343.change.rst delete mode 100644 doc/release/upcoming_changes/28426.change.rst delete mode 100644 doc/release/upcoming_changes/28436.change.rst delete mode 100644 doc/release/upcoming_changes/28442.improvement.rst delete mode 100644 doc/release/upcoming_changes/28569.change.rst delete mode 100644 doc/release/upcoming_changes/28576.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28615.change.rst delete mode 100644 doc/release/upcoming_changes/28619.highlight.rst delete mode 100644 doc/release/upcoming_changes/28619.performance.rst delete mode 100644 doc/release/upcoming_changes/28669.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28703.change.rst delete mode 100644 doc/release/upcoming_changes/28713.change.rst delete mode 100644 doc/release/upcoming_changes/28741.change.rst delete mode 100644 doc/release/upcoming_changes/28769.performance.rst delete mode 100644 doc/release/upcoming_changes/28856.improvement.rst delete mode 100644 doc/release/upcoming_changes/28884.deprecation.rst delete mode 100644 doc/release/upcoming_changes/28940.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28961.expired.rst create mode 100644 doc/source/release/2.4.0-notes.rst diff --git a/doc/release/upcoming_changes/26018.change.rst b/doc/release/upcoming_changes/26018.change.rst deleted file mode 100644 index 9d7c139be183..000000000000 --- a/doc/release/upcoming_changes/26018.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` may return unsorted data ------------------------------------------- -The relatively new function (added in NumPy 2.0) ``unique_values`` may now -return unsorted results. Just as ``unique_counts`` and ``unique_all`` -these never guaranteed a sorted result, however, the result -was sorted until now. In cases where these do return a sorted result, this -may change in future releases to improve performance. diff --git a/doc/release/upcoming_changes/26018.performance.rst b/doc/release/upcoming_changes/26018.performance.rst deleted file mode 100644 index ffeab51dbdf6..000000000000 --- a/doc/release/upcoming_changes/26018.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.unique`` ------------------------------------------ -``np.unique`` now tries to use a hash table to find unique values instead of sorting -values before finding unique values. This is limited to certain dtypes for now, and -the function is now faster for those dtypes. The function now also exposes a ``sorted`` -parameter to allow returning unique values as they were found, instead of sorting them -afterwards. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26745.highlight.rst b/doc/release/upcoming_changes/26745.highlight.rst deleted file mode 100644 index 5636f919c80d..000000000000 --- a/doc/release/upcoming_changes/26745.highlight.rst +++ /dev/null @@ -1,10 +0,0 @@ -Interactive examples in the NumPy documentation ------------------------------------------------ - -The NumPy documentation includes a number of examples that -can now be run interactively in your browser using WebAssembly -and Pyodide. - -Please note that the examples are currently experimental in -nature and may not work as expected for all methods in the -public API. diff --git a/doc/release/upcoming_changes/27288.improvement.rst b/doc/release/upcoming_changes/27288.improvement.rst deleted file mode 100644 index c7319554c63f..000000000000 --- a/doc/release/upcoming_changes/27288.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Scalar comparisons between non-comparable dtypes such as - `np.array(1) == np.array('s')` now return a NumPy bool instead of - a Python bool. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27789.new_function.rst b/doc/release/upcoming_changes/27789.new_function.rst deleted file mode 100644 index 734a0c3bc2b5..000000000000 --- a/doc/release/upcoming_changes/27789.new_function.rst +++ /dev/null @@ -1,5 +0,0 @@ -New function `numpy.strings.slice` ----------------------------------- -The new function `numpy.strings.slice` was added, which implements fast -native slicing of string arrays. It supports the full slicing API including -negative slice offsets and steps. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.c_api.rst b/doc/release/upcoming_changes/27883.c_api.rst deleted file mode 100644 index 107e0036c5c2..000000000000 --- a/doc/release/upcoming_changes/27883.c_api.rst +++ /dev/null @@ -1,4 +0,0 @@ -* `NpyIter_GetTransferFlags` is now available to check if - the iterator needs the Python API or if casts may cause floating point - errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` - to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.change.rst b/doc/release/upcoming_changes/27883.change.rst deleted file mode 100644 index ea68771efba3..000000000000 --- a/doc/release/upcoming_changes/27883.change.rst +++ /dev/null @@ -1,17 +0,0 @@ -Changes to the main iterator and potential numerical changes ------------------------------------------------------------- -The main iterator, used in math functions and via ``np.nditer`` from Python -and ``NpyIter`` in C, now behaves differently for some buffered iterations. -This means that: - -* The buffer size used will often be smaller than the maximum buffer sized - allowed by the ``buffersize`` parameter. -* The "growinner" flag is now honored with buffered reductions when no operand - requires buffering. - -For ``np.sum()`` such changes in buffersize may slightly change numerical -results of floating point operations. -Users who use "growinner" for custom reductions could notice -changes in precision (for example, in NumPy we removed it from -``einsum`` to avoid most precision changes and improve precision -for some 64bit floating point inputs). diff --git a/doc/release/upcoming_changes/27998.c_api.rst b/doc/release/upcoming_changes/27998.c_api.rst deleted file mode 100644 index edc6371af1f9..000000000000 --- a/doc/release/upcoming_changes/27998.c_api.rst +++ /dev/null @@ -1,10 +0,0 @@ -New `NpyIter_GetTransferFlags` and ``NpyIter_IterationNeedsAPI`` change ------------------------------------------------------------------------ -NumPy now has the new `NpyIter_GetTransferFlags` function as a more precise -way checking of iterator/buffering needs. I.e. whether the Python API/GIL is -required or floating point errors may occur. -This function is also faster if you already know your needs without buffering. - -The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were -previously performed at setup time. While it was never necessary to call it -multiple times, doing so will now have a larger cost. diff --git a/doc/release/upcoming_changes/28080.c_api.rst b/doc/release/upcoming_changes/28080.c_api.rst deleted file mode 100644 index f72be7ef52fe..000000000000 --- a/doc/release/upcoming_changes/28080.c_api.rst +++ /dev/null @@ -1 +0,0 @@ -* ``NpyIter`` now has no limit on the number of operands it supports. diff --git a/doc/release/upcoming_changes/28080.improvement.rst b/doc/release/upcoming_changes/28080.improvement.rst deleted file mode 100644 index 19b85ae3c96a..000000000000 --- a/doc/release/upcoming_changes/28080.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``np.nditer`` now has no limit on the number of supported operands - (C-integer). diff --git a/doc/release/upcoming_changes/28102.change.rst b/doc/release/upcoming_changes/28102.change.rst deleted file mode 100644 index bd54378a652e..000000000000 --- a/doc/release/upcoming_changes/28102.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -The minimum supported GCC version is now 9.3.0 ----------------------------------------------- -The minimum supported version was updated from 8.4.0 to 9.3.0, -primarily in order to reduce the chance of platform-specific bugs in old GCC -versions from causing issues. - diff --git a/doc/release/upcoming_changes/28105.improvement.rst b/doc/release/upcoming_changes/28105.improvement.rst deleted file mode 100644 index 537467575234..000000000000 --- a/doc/release/upcoming_changes/28105.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* No-copy pickling is now supported for any - array that can be transposed to a C-contiguous array. \ No newline at end of file diff --git a/doc/release/upcoming_changes/28129.deprecation.rst b/doc/release/upcoming_changes/28129.deprecation.rst deleted file mode 100644 index b1beb0c5cca3..000000000000 --- a/doc/release/upcoming_changes/28129.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic - static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` - section of your mypy configuration. If this change results in new errors being - reported, kindly open an issue. diff --git a/doc/release/upcoming_changes/28205.improvement.rst b/doc/release/upcoming_changes/28205.improvement.rst deleted file mode 100644 index 42eaaac98363..000000000000 --- a/doc/release/upcoming_changes/28205.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Added warnings to `np.isclose` ---------------------------------- -Added warning messages if at least one of atol or rtol are -either `np.nan` or `np.inf` within `np.isclose` - -* Warnings follow the user's `np.seterr` settings diff --git a/doc/release/upcoming_changes/28214.new_feature.rst b/doc/release/upcoming_changes/28214.new_feature.rst deleted file mode 100644 index eb95a0739e79..000000000000 --- a/doc/release/upcoming_changes/28214.new_feature.rst +++ /dev/null @@ -1,23 +0,0 @@ -NumPy now registers its pkg-config paths with the pkgconf_ PyPI package ------------------------------------------------------------------------ - -The pkgconf_ PyPI package provides an interface for projects like NumPy to -register their own paths to be added to the pkg-config search path. This means -that when using pkgconf_ from PyPI, NumPy will be discoverable without needing -for any custom environment configuration. - -.. attention:: Attention - - This only applies when using the pkgconf_ package from PyPI_, or put another - way, this only applies when installing pkgconf_ via a Python package - manager. - - If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or - any other source that does not use the pkgconf-pypi_ project, the NumPy - pkg-config directory will not be automatically added to the search path. In - these situations, you might want to use ``numpy-config``. - - -.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi -.. _PyPI: https://pypi.org/ -.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi diff --git a/doc/release/upcoming_changes/28250.improvement.rst b/doc/release/upcoming_changes/28250.improvement.rst deleted file mode 100644 index 703a8bb0c2e1..000000000000 --- a/doc/release/upcoming_changes/28250.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the - custom dtype over a more generic name constructed from its ``kind`` and - ``itemsize``. diff --git a/doc/release/upcoming_changes/28254.expired.rst b/doc/release/upcoming_changes/28254.expired.rst deleted file mode 100644 index 5f391eb6cbe2..000000000000 --- a/doc/release/upcoming_changes/28254.expired.rst +++ /dev/null @@ -1,29 +0,0 @@ -* Remove deprecated macros like ``NPY_OWNDATA`` from cython interfaces in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove alias ``generate_divbyzero_error`` to ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to ``npy_set_floatstatus_overflow`` (deprecated since 1.10) -* Remove ``np.tostring`` (deprecated since 1.19) -* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) -* Raise when using ``np.bincount(...minlength=None)``, use 0 instead (deprecated since 1.14) -* Passing ``shape=None`` to functions with a non-optional shape argument errors, use ``()`` instead (deprecated since 1.20) -* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) -* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) -* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they would guess (deprecated since 1.18) -* ``datetime64`` and ``timedelta64`` construction with a tuple no longer accepts an ``event`` value, either use a two-tuple of (unit, num) or a 4-tuple of (unit, num, den, 1) (deprecated since 1.14) -* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that attribute must be a dtype-instance rather than a thing that can be parsed as a dtype instance (deprecated in 1.19). At some point the whole construct of using a dtype attribute will be deprecated (see #25306) -* Passing booleans as partition index errors (deprecated since 1.23) -* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) -* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) -* Disallow make a non-writeable array writeable for arrays with a base that do not own their data (deprecated since 1.17) -* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, not ``unsafe`` (deprecated since 1.20) -* Unpickling a scalar with object dtype errors (deprecated since 1.20) -* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead (deprecated since 1.14) -* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated since 1.19) -* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since 1.19) -* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or ``scalar.round`` instead (deprecated since 1.19) -* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) -* Parsing an integer via a float string is no longer supported. (deprecated since 1.23) To avoid this error you can - * make sure the original data is stored as integers. - * use the ``converters=float`` keyword argument. - * Use ``np.loadtxt(...).astype(np.int64)`` -* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` or fill the tuple with ``None`` (deprecated since 1.19) -* Special handling of matrix is in np.outer is removed. Convert to a ndarray via ``matrix.A`` (deprecated since 1.20) diff --git a/doc/release/upcoming_changes/28343.change.rst b/doc/release/upcoming_changes/28343.change.rst deleted file mode 100644 index 378ef775b62e..000000000000 --- a/doc/release/upcoming_changes/28343.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` now always returns zero for empty arrays. Empty arrays have at least one axis of size zero. This affects `np.linalg.norm`, `np.linalg.vector_norm`, and `np.linalg.matrix_norm`. Previously, NumPy would raises errors or return zero depending on the shape of the array. diff --git a/doc/release/upcoming_changes/28426.change.rst b/doc/release/upcoming_changes/28426.change.rst deleted file mode 100644 index d1c48640eed0..000000000000 --- a/doc/release/upcoming_changes/28426.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -Changes to automatic bin selection in numpy.histogram ------------------------------------------------------ -The automatic bin selection algorithm in ``numpy.histogram`` has been modified -to avoid out-of-memory errors for samples with low variation. -For full control over the selected bins the user can use set -the ``bin`` or ``range`` parameters of ``numpy.histogram``. diff --git a/doc/release/upcoming_changes/28436.change.rst b/doc/release/upcoming_changes/28436.change.rst deleted file mode 100644 index 60149e55a4d0..000000000000 --- a/doc/release/upcoming_changes/28436.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -Build manylinux_2_28 wheels ---------------------------- - -Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the ``manylinux2014`` tag), which means -dropping support for redhat7/centos7, amazonlinux2, debian9, ubuntu18.04, and -other pre-glibc2.28 operating system versions, as per the `PEP 600 support -table`_. - -.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check - diff --git a/doc/release/upcoming_changes/28442.improvement.rst b/doc/release/upcoming_changes/28442.improvement.rst deleted file mode 100644 index 16d71bde19c5..000000000000 --- a/doc/release/upcoming_changes/28442.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -* ``np.dot`` now reports floating point exceptions. diff --git a/doc/release/upcoming_changes/28569.change.rst b/doc/release/upcoming_changes/28569.change.rst deleted file mode 100644 index f9d26fda0484..000000000000 --- a/doc/release/upcoming_changes/28569.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* A spelling error in the error message returned when converting a string to a float with the - method ``np.format_float_positional`` has been fixed. diff --git a/doc/release/upcoming_changes/28576.new_feature.rst b/doc/release/upcoming_changes/28576.new_feature.rst deleted file mode 100644 index 2c50887a49f2..000000000000 --- a/doc/release/upcoming_changes/28576.new_feature.rst +++ /dev/null @@ -1,15 +0,0 @@ -Allow ``out=...`` in ufuncs to ensure array result --------------------------------------------------- -NumPy has the sometimes difficult behavior that it currently usually -returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). -This is especially problematic for non-numerical dtypes (e.g. ``object``). - -For ufuncs (i.e. most simple math functions) it is now possible -to use ``out=...`` (literally `...`, e.g. ``out=Ellipsis``) which is identical in behavior to ``out`` not -being passed, but will ensure a non-scalar return. -This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` -also ensures a non-scalar return. - -Other functions with an ``out=`` kwarg should gain support eventually. -Downstream libraries that interoperate via ``__array_ufunc__`` or -``__array_function__`` may need to adapt to support this. diff --git a/doc/release/upcoming_changes/28615.change.rst b/doc/release/upcoming_changes/28615.change.rst deleted file mode 100644 index 58b751e40704..000000000000 --- a/doc/release/upcoming_changes/28615.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. -* `numpy.count_nonzero` for ``axis=None`` (default) now returns a NumPy scalar - instead of a Python integer. -* The parameter ``axis`` in `numpy.take_along_axis` function has now a default - value of ``-1``. diff --git a/doc/release/upcoming_changes/28619.highlight.rst b/doc/release/upcoming_changes/28619.highlight.rst deleted file mode 100644 index 6c296b92899e..000000000000 --- a/doc/release/upcoming_changes/28619.highlight.rst +++ /dev/null @@ -1,6 +0,0 @@ -Building NumPy with OpenMP Parallelization -------------------------------------------- -NumPy now supports OpenMP parallel processing capabilities when built with the -``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. -When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for -parallel thread execution, improving performance for these operations. diff --git a/doc/release/upcoming_changes/28619.performance.rst b/doc/release/upcoming_changes/28619.performance.rst deleted file mode 100644 index 904decbe0ba6..000000000000 --- a/doc/release/upcoming_changes/28619.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.sort`` and ``np.argsort`` ----------------------------------------------------------- -``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel -thread execution, resulting in up to 3.5x speedups on x86 architectures with -AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built -with the -Denable_openmp Meson flag. Users can control the number of threads -used by setting the OMP_NUM_THREADS environment variable. diff --git a/doc/release/upcoming_changes/28669.new_feature.rst b/doc/release/upcoming_changes/28669.new_feature.rst deleted file mode 100644 index 2953a5123ccc..000000000000 --- a/doc/release/upcoming_changes/28669.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. - This way, static type-checkers will infer ``dtype: np.dtype`` as - ``dtype: np.dtype[Any]``, without reporting an error. diff --git a/doc/release/upcoming_changes/28703.change.rst b/doc/release/upcoming_changes/28703.change.rst deleted file mode 100644 index 87bb431951f9..000000000000 --- a/doc/release/upcoming_changes/28703.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by - adjusting the transition to scientific notation based on the floating point precision. - A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. diff --git a/doc/release/upcoming_changes/28713.change.rst b/doc/release/upcoming_changes/28713.change.rst deleted file mode 100644 index 5e5c5adde88b..000000000000 --- a/doc/release/upcoming_changes/28713.change.rst +++ /dev/null @@ -1 +0,0 @@ -Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, and results in libraries that cannot link to other libraries built with ld (new). diff --git a/doc/release/upcoming_changes/28741.change.rst b/doc/release/upcoming_changes/28741.change.rst deleted file mode 100644 index ca9531f490d8..000000000000 --- a/doc/release/upcoming_changes/28741.change.rst +++ /dev/null @@ -1 +0,0 @@ -Re-enable overriding functions in the :mod:`numpy.strings` module. diff --git a/doc/release/upcoming_changes/28769.performance.rst b/doc/release/upcoming_changes/28769.performance.rst deleted file mode 100644 index 7fb8f02282f6..000000000000 --- a/doc/release/upcoming_changes/28769.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -Performance improvements for ``np.float16`` casts --------------------------------------------------- -Earlier, floating point casts to and from ``np.float16`` types -were emulated in software on all platforms. - -Now, on ARM devices that support Neon float16 intrinsics (such as -recent Apple Silicon), the native float16 path is used to achieve -the best performance. diff --git a/doc/release/upcoming_changes/28856.improvement.rst b/doc/release/upcoming_changes/28856.improvement.rst deleted file mode 100644 index 83911035f097..000000000000 --- a/doc/release/upcoming_changes/28856.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -* ``np.dtypes.StringDType`` is now a - `generic type `_ which - accepts a type argument for ``na_object`` that defaults to ``typing.Never``. - For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, - and ``StringDType()`` returns a ``StringDType[typing.Never]``. diff --git a/doc/release/upcoming_changes/28884.deprecation.rst b/doc/release/upcoming_changes/28884.deprecation.rst deleted file mode 100644 index c1be55fb0dd3..000000000000 --- a/doc/release/upcoming_changes/28884.deprecation.rst +++ /dev/null @@ -1,28 +0,0 @@ -``numpy.typing.NBitBase`` deprecation -------------------------------------- -The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in a future version. - -This type was previously intended to be used as a generic upper bound for type-parameters, for example: - -.. code-block:: python - - import numpy as np - import numpy.typing as npt - - def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... - -But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete subtypes, causing static type-checkers to reject ``x: np.float64 = f(np.complex128(42j))``. - -So instead, the better approach is to use ``typing.overload``: - -.. code-block:: python - - import numpy as np - from typing import overload - - @overload - def f(x: np.complex64) -> np.float32: ... - @overload - def f(x: np.complex128) -> np.float64: ... - @overload - def f(x: np.clongdouble) -> np.longdouble: ... diff --git a/doc/release/upcoming_changes/28940.new_feature.rst b/doc/release/upcoming_changes/28940.new_feature.rst deleted file mode 100644 index e0d3dc8888c3..000000000000 --- a/doc/release/upcoming_changes/28940.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -* Static type-checkers now interpret: - - - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. - - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. - - This is because their type parameters now have default values. diff --git a/doc/release/upcoming_changes/28961.expired.rst b/doc/release/upcoming_changes/28961.expired.rst deleted file mode 100644 index 92031de35e62..000000000000 --- a/doc/release/upcoming_changes/28961.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Removed the ``np.compat`` package source code (removed in 2.0) diff --git a/doc/source/release.rst b/doc/source/release.rst index 36d5e6731f4f..6c6a853b06f5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.4.0 2.3.0 2.2.6 2.2.5 diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst new file mode 100644 index 000000000000..29a7e5ce6073 --- /dev/null +++ b/doc/source/release/2.4.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.4.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index d448df066a19..0d642d760b21 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -78,5 +78,6 @@ # Version 19 (NumPy 2.1.0) Only header additions # Version 19 (NumPy 2.2.0) No change 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 -# Version 19 (NumPy 2.3.0) +# Version 20 (NumPy 2.3.0) +# Version 20 (NumPy 2.4.0) No change 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index ba44c28b9d0f..52d7e2b5d7d7 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -172,7 +172,7 @@ #define NPY_FEATURE_VERSION_STRING "2.0" #elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.1" -#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION /* also 2.4 */ #define NPY_FEATURE_VERSION_STRING "2.3" #else #error "Missing version string define for new NumPy version." diff --git a/pavement.py b/pavement.py index e00b9647f5e3..369b8703b0ba 100644 --- a/pavement.py +++ b/pavement.py @@ -35,7 +35,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.3.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.4.0-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..5cf75b20a6b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.3.0.dev0" +version = "2.4.0.dev0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From bae7d087c54f5a4ce25f4906b8fcefc92fed799c Mon Sep 17 00:00:00 2001 From: Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Date: Wed, 21 May 2025 20:50:31 +0100 Subject: [PATCH 017/294] Fix workflow error --- .github/workflows/windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index f61419df09cd..6c02563150da 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -35,7 +35,7 @@ jobs: persist-credentials: false - name: Setup Python - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: ${{ matrix.compiler-pyversion[1] }} From 54529e22743cffe6f997b725a7fcff650736395b Mon Sep 17 00:00:00 2001 From: Angus Gibson Date: Thu, 22 May 2025 11:42:18 +1000 Subject: [PATCH 018/294] BUG: Avoid compile errors in f2py modules Some of the casts from cfuncs pass PyObject* to PyArrayObject*, which causes compile errors due to incompatible pointer types on at least GCC 14. --- numpy/f2py/cfuncs.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index a9a56b2455f2..6c48c1ef0175 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1047,9 +1047,12 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } } } if (double_from_pyobj(&d, obj, errmess)) { @@ -1131,10 +1134,13 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { From ce6a72c41efe545f68dcd831e6193fcdd450180b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:09:04 +0200 Subject: [PATCH 019/294] DOC: Expand/clean up extension module import error We get a lot of these reports and it is never us... But unfortunately, Python (currently) doesn't report *why* the module wasn't found. (I have opened an issue asking for that.) Until Python does, try to figure it out ourselves, i.e. list C modules (I guess its always one, but OK). If anything it'll give *us* an immediate thing to point out if an issue is reported... I also hid the "source import" thing to only occur if __config__ doesn't exist. Not sure that catches this fully, but I also feel like this isn't an actual problem anymore (i.e. we could just delete it also). Tested locally by renaming or deleting `_multiarray_umath`. --- numpy/__init__.py | 10 ++++++---- numpy/_core/__init__.py | 43 +++++++++++++++++++++++++++++++++-------- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 8fb2e742dfc4..3a67bd221247 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -111,10 +111,12 @@ try: from numpy.__config__ import show_config except ImportError as e: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) from e + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e from . import _core from ._core import ( diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index d0da7e0ad9ed..f19557277756 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -22,29 +22,56 @@ from . import multiarray except ImportError as exc: import sys - msg = """ + + # Basically always, the problem should be that the C module is wrong/missing... + if isinstance(exc, ModuleNotFoundError) and exc.name == "numpy._core._multiarray_umath": + import sys + candidates = [] + for path in __path__: + candidates.extend(f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, was NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. - +{bad_c_module_info} We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: - * The Python version is: Python%d.%d from "%s" - * The NumPy version is: "%s" + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" and make sure that they are the versions you expect. -Please carefully study the documentation linked above for further help. -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + raise ImportError(msg) from exc finally: for envkey in env_added: From 552ce2a6ddba62c874629d7d5c2dfb21af403d50 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:15:36 +0200 Subject: [PATCH 020/294] appease linter --- numpy/_core/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index f19557277756..268932649eac 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -24,11 +24,15 @@ import sys # Basically always, the problem should be that the C module is wrong/missing... - if isinstance(exc, ModuleNotFoundError) and exc.name == "numpy._core._multiarray_umath": + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): import sys candidates = [] for path in __path__: - candidates.extend(f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) if len(candidates) == 0: bad_c_module_info = ( "We found no compiled module, was NumPy build successfully?\n") From d63cc918f588d6eb92fd20b22dc509e345a728d8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 22 May 2025 12:33:27 +0200 Subject: [PATCH 021/294] Revert enable/CIBW_ENABLE changes [wheel build] --- .github/workflows/wheels.yml | 2 -- pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bede99dd6e9a..fa2c1cb5ae97 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -180,8 +180,6 @@ jobs: uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - # delete when we switch 3.14(t)-dev to 3.14(t) - CIBW_ENABLE: cpython-prerelease - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/pyproject.toml b/pyproject.toml index 2d9fbb1e2675..5cf75b20a6b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,7 +148,7 @@ before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy"] +enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" From dd95e3ec53570fea721d6ff82537d7e657792787 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:34:46 +0200 Subject: [PATCH 022/294] explicitly re-raise again (worked before because the next line raises...) --- numpy/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/__init__.py b/numpy/__init__.py index 3a67bd221247..aadc1fab3407 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -117,6 +117,7 @@ its source directory; please exit the numpy source tree, and relaunch your python interpreter from there.""" raise ImportError(msg) from e + raise from . import _core from ._core import ( From ae01519952d0cf4bd8ace320453340a3e8a42ee7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 13:50:56 +0200 Subject: [PATCH 023/294] Update numpy/_core/__init__.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Melissa Weber Mendonça --- numpy/_core/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 268932649eac..7b19cefb2f93 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -35,7 +35,7 @@ f for f in os.listdir(path) if f.startswith("_multiarray_umath")) if len(candidates) == 0: bad_c_module_info = ( - "We found no compiled module, was NumPy build successfully?\n") + "We found no compiled module, did NumPy build successfully?\n") else: candidate_str = '\n * '.join(candidates) # cache_tag is documented to be possibly None, so just use name if it is From 3cbb8cd321f7676cf3ba387e081c42a5ff08637b Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:00:09 -0400 Subject: [PATCH 024/294] BUG: handle the case of modules with derived types --- numpy/f2py/auxfuncs.py | 16 +++++++++++++++- numpy/f2py/auxfuncs.pyi | 2 ++ numpy/f2py/f90mod_rules.py | 4 ++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 6e58e6352224..a5af31d976ec 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -42,7 +42,7 @@ 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', - 'process_f2cmap_dict', 'containscommon' + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' ] @@ -569,6 +569,20 @@ def containscommon(rout): return 0 +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + def containsmodule(block): if ismodule(block): return 1 diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 2a0d4e106bcc..1212f229c660 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -200,11 +200,13 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # +def containsderivedtypes(rout: _ROut) -> _Bool: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... def hasbody(rout: _ROut) -> _Bool: ... def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... def hascallstatement(rout: _ROut) -> bool: ... def isroutine(rout: _ROut) -> bool: ... def ismodule(rout: _ROut) -> bool: ... diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 29adbe78a26f..d13a42a9d71f 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -120,6 +120,10 @@ def dadd(line, s=doc): if m['name'] in usenames and containscommon(m): outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue if onlyvars: outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") chooks = [''] From f84c105194013644a6f2e305a3ac6a6d3c946eb3 Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:01:41 -0400 Subject: [PATCH 025/294] TST: tests for modules with derived types --- .../src/regression/mod_derived_types.f90 | 23 +++++++++++++++++++ numpy/f2py/tests/test_regression.py | 10 ++++++++ 2 files changed, 33 insertions(+) create mode 100644 numpy/f2py/tests/src/regression/mod_derived_types.f90 diff --git a/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 000000000000..7692c82cf42e --- /dev/null +++ b/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 1931ad21a48b..5e3bac38b287 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -37,6 +37,16 @@ def test_mdat(self): assert self.module.simple_subroutine(5) == 1014 +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] From ac9a024a1b469ccf5a684b74048ea76b0e0d7434 Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:21:36 -0400 Subject: [PATCH 026/294] fix W291 --- numpy/f2py/tests/test_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 5e3bac38b287..93eb29e8e723 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -44,7 +44,7 @@ class TestModuleWithDerivedType(util.F2PyTest): @pytest.mark.slow def test_mtypes(self): assert self.module.no_type_subroutine(10) == 110 - assert self.module.type_subroutine(10) == 210 + assert self.module.type_subroutine(10) == 210 class TestNegativeBounds(util.F2PyTest): From cbd2fef887aa32af84bc6efb3653370560e4bcec Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 17:17:57 +0200 Subject: [PATCH 027/294] BUG: Fix cache use regression gh-29006 got the branching wrong leaving the cache undefined on most GCC/clang, which means we wouldn't use it. Also move it up so that we can just remove the unused globals entirely. --- numpy/_core/src/multiarray/alloc.c | 35 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 280ca81a35a7..64c2b854167d 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -27,9 +27,22 @@ #endif #endif -#define NBUCKETS 1024 /* number of buckets for data*/ -#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ -#define NCACHE 7 /* number of cache entries per bucket */ +/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN + * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN + * use-of-uninitialized-memory warnings less useful. */ +#define USE_ALLOC_CACHE 1 +#ifdef Py_GIL_DISABLED +# define USE_ALLOC_CACHE 0 +#elif defined(__has_feature) +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif +#endif + +#if USE_ALLOC_CACHE +# define NBUCKETS 1024 /* number of buckets for data*/ +# define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ +# define NCACHE 7 /* number of cache entries per bucket */ /* this structure fits neatly into a cacheline */ typedef struct { npy_uintp available; /* number of cached pointers */ @@ -37,7 +50,7 @@ typedef struct { } cache_bucket; static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; - +#endif /* USE_ALLOC_CACHE */ /* * This function tells whether NumPy attempts to call `madvise` with @@ -99,20 +112,6 @@ indicate_hugepages(void *p, size_t size) { } -/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN - * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN - * use-of-uninitialized-memory warnings less useful. */ -#ifdef Py_GIL_DISABLED -#define USE_ALLOC_CACHE 0 -#elif defined(__has_feature) -# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) -# define USE_ALLOC_CACHE 0 -# endif -#else -#define USE_ALLOC_CACHE 1 -#endif - - /* as the cache is managed in global variables verify the GIL is held */ /* From e57bb2b429fab1b8af27a85182d1ebebf3ea80aa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 17:31:42 +0200 Subject: [PATCH 028/294] ok, don't attempt to remove the statics (too ingrained) --- numpy/_core/src/multiarray/alloc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 64c2b854167d..cc9c5762a196 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -39,7 +39,6 @@ # endif #endif -#if USE_ALLOC_CACHE # define NBUCKETS 1024 /* number of buckets for data*/ # define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ # define NCACHE 7 /* number of cache entries per bucket */ @@ -50,7 +49,6 @@ typedef struct { } cache_bucket; static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; -#endif /* USE_ALLOC_CACHE */ /* * This function tells whether NumPy attempts to call `madvise` with From 06f4d5ed1b854869d4e9b1a3df7e9a7b9e60e77d Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Fri, 23 May 2025 11:48:01 -0400 Subject: [PATCH 029/294] ENH: add __array_function__ protocol in polynomial (#28996) --- numpy/polynomial/polynomial.py | 9 ++++++++- numpy/polynomial/tests/test_polynomial.py | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 32b53b757a1c..6ec0dc58a1de 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -82,6 +82,7 @@ import numpy as np import numpy.linalg as la +from numpy._core.overrides import array_function_dispatch from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu @@ -841,7 +842,13 @@ def polyvalfromroots(x, r, tensor=True): raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) + +@array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -893,7 +900,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) - +@array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 27513fd682e8..8bfa3c184cf7 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -667,3 +667,25 @@ def test_result_type(self): arr = np.polydiv(1, np.float32(1)) assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" From 4c1eb7076a7623937097544cdd6a4ae5fc1aee6d Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 25 May 2025 03:23:48 +0200 Subject: [PATCH 030/294] TYP: annotate strings.slice --- numpy/_core/strings.pyi | 15 +++++++++++++++ numpy/strings/__init__.pyi | 2 ++ numpy/typing/tests/data/reveal/strings.pyi | 4 ++++ 3 files changed, 21 insertions(+) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 52d244b36ccd..b187ce71d25c 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -54,6 +54,7 @@ __all__ = [ "translate", "upper", "zfill", + "slice", ] _StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] @@ -494,3 +495,17 @@ def translate( table: str, deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +@overload +def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/strings/__init__.pyi b/numpy/strings/__init__.pyi index 2f924dca0b5d..b2fb363531d4 100644 --- a/numpy/strings/__init__.pyi +++ b/numpy/strings/__init__.pyi @@ -36,6 +36,7 @@ from numpy._core.strings import ( rjust, rpartition, rstrip, + slice, startswith, str_len, strip, @@ -92,4 +93,5 @@ __all__ = [ "decode", "encode", "translate", + "slice", ] diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 746e804ce577..18bd252d5ff9 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -190,3 +190,7 @@ assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) From aaae32e45830a9b3d9c026d5e2d79390ccda50e6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 25 May 2025 18:48:58 +0200 Subject: [PATCH 031/294] TYP: remove expired ``tostring`` methods --- numpy/lib/_user_array_impl.pyi | 4 +--- numpy/ma/core.pyi | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index c1c72b2320f1..13c0a0163421 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -2,7 +2,7 @@ from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete -from typing_extensions import TypeVar, deprecated, override +from typing_extensions import TypeVar, override import numpy as np import numpy.typing as npt @@ -220,8 +220,6 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # def copy(self, /) -> Self: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /) -> bytes: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f457f18d57bd..7e87611037b9 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -5,7 +5,7 @@ from collections.abc import Sequence from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete -from typing_extensions import TypeIs, TypeVar, deprecated +from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( @@ -1057,8 +1057,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... From 8ba2c63d1f30fc2aece18fcad78597d2a8fa94b6 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 25 May 2025 21:11:10 +0300 Subject: [PATCH 032/294] use pypy 3.11 nightly which has a fix for ctypeslib [skip azp][skip circleci] --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index dc9ef34db71d..742ca5c34144 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,7 +82,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: 'pypy3.11-v7.3.19' + python-version: 'pypy3.11-nightly' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt From 3d4629c9de79fe95d7f3d0724693c875d91d48f8 Mon Sep 17 00:00:00 2001 From: Zebreus Date: Mon, 26 May 2025 09:24:10 +0200 Subject: [PATCH 033/294] BLD: allow targeting webassembly without emscripten --- numpy/_core/include/numpy/npy_cpu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 4fb3fb406869..72f7331a0267 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -111,8 +111,9 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 -#elif defined(__EMSCRIPTEN__) +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ #define NPY_CPU_WASM #else #error Unknown CPU, please report this to numpy maintainers with \ From 8389ff9d1e6cbe7e922592e2f3d27913cb2e2e99 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:19:08 +0200 Subject: [PATCH 034/294] TYP: fix invalid overload definition in ``_core.defchararray.add`` --- numpy/_core/defchararray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 776962c53998..43005745bfab 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -555,7 +555,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... From 4cce8e4dbdf58ed8851d0ddbdf37bca14fe3d9f7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:27:15 +0200 Subject: [PATCH 035/294] TYP: annotate the ``*args`` and ``**kwargs`` of the ``ufunc`` methods --- numpy/_typing/_ufunc.pyi | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 766cde1ad420..104307da89db 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -146,10 +146,10 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, ) -> None: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -382,11 +382,11 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -439,11 +439,11 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @@ -494,11 +494,11 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] axes: list[_2Tuple[SupportsIndex]] = ..., ) -> NDArray[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): From 466f64f961b9bc0845a7d4e69e9186c13b2012c7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:27:54 +0200 Subject: [PATCH 036/294] TYP: annotate the return type of ``numpy.typing.__getattr__`` --- numpy/typing/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 173c094b40aa..163655bd7662 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -169,7 +169,7 @@ def __dir__() -> list[str]: return __DIR -def __getattr__(name: str): +def __getattr__(name: str) -> object: if name == "NBitBase": import warnings From 1707fa13355d32b487b1f628a8da9925be136581 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:29:09 +0200 Subject: [PATCH 037/294] TYP: annotate ``numpy.lib._format_impl`` --- numpy/lib/_format_impl.pyi | 73 +++++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index f4898d9aefa4..870c2d761bb0 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,26 +1,57 @@ -from typing import Final, Literal +import os +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard -from numpy.lib._utils_impl import drop_metadata # noqa: F401 +from _typeshed import SupportsRead, SupportsWrite + +import numpy as np +import numpy.typing as npt +from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -EXPECTED_KEYS: Final[set[str]] -MAGIC_PREFIX: Final[bytes] -MAGIC_LEN: Literal[8] -ARRAY_ALIGN: Literal[64] -BUFFER_SIZE: Literal[262144] # 2**18 -GROWTH_AXIS_MAX_DIGITS: Literal[21] +_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] + +### + +EXPECTED_KEYS: Final[set[str]] = ... +MAGIC_PREFIX: Final = b"\x93NUMPY" +MAGIC_LEN: Final = 8 +ARRAY_ALIGN: Final = 64 +BUFFER_SIZE: Final = 262_144 # 1 << 18 +GROWTH_AXIS_MAX_DIGITS: Final = 21 +_MAX_HEADER_SIZE: Final = 10_000 -def magic(major, minor): ... -def read_magic(fp): ... -def dtype_to_descr(dtype): ... -def descr_to_dtype(descr): ... -def header_data_from_array_1_0(array): ... -def write_array_header_1_0(fp, d): ... -def write_array_header_2_0(fp, d): ... -def read_array_header_1_0(fp): ... -def read_array_header_2_0(fp): ... -def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... -def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... -def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... -def isfileobj(f): ... +def magic(major: int, minor: int) -> bytes: ... +def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ... +def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr: ... +def descr_to_dtype(descr: _DTypeDescr) -> np.dtype: ... +def header_data_from_array_1_0(array: np.ndarray) -> dict[str, Any]: ... +def write_array_header_1_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def write_array_header_2_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def write_array( + fp: SupportsWrite[bytes], + array: np.ndarray, + version: tuple[int, int] | None = None, + allow_pickle: bool = True, + pickle_kwargs: dict[str, Any] | None = None, +) -> None: ... +def read_array( + fp: SupportsRead[bytes], + allow_pickle: bool = False, + pickle_kwargs: dict[str, Any] | None = None, + *, + max_header_size: int = 10_000, +) -> np.ndarray: ... +def open_memmap( + filename: str | os.PathLike[Any], + mode: str = "r+", + dtype: npt.DTypeLike | None = None, + shape: tuple[int, ...] | None = None, + fortran_order: bool = False, + version: tuple[int, int] | None = None, + *, + max_header_size: int = 10_000, +) -> np.memmap: ... +def isfileobj(f: object) -> TypeGuard[BinaryIO]: ... # don't use `typing.TypeIs` From 436ec159fad4aafd99bba4543ae9aa925e64385f Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 27 May 2025 07:02:02 +0300 Subject: [PATCH 038/294] BLD: use sonoma image on Cirrus for [wheel build][skip actions][skip azp] --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index b531e953daee..6d02411df2e9 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -11,7 +11,7 @@ macosx_arm64_task: CIRRUS_CLONE_SUBMODULES: true macos_instance: matrix: - image: ghcr.io/cirruslabs/macos-monterey-xcode + image: ghcr.io/cirruslabs/macos-runner:sonoma matrix: - env: From fba2e60fb95891e8e044113afe0a321405f581ea Mon Sep 17 00:00:00 2001 From: Hamza Meel Date: Tue, 27 May 2025 11:07:56 +0200 Subject: [PATCH 039/294] DOC: fix typo in documentation of vecmat The body of the summary uses the symbol v to reference the `x1` parameter, however in the displayed math, b is used instead. This commit changes b to v in the displayed math for concistency. --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 5d3ba73c92f0..aa9fe4acea05 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3030,7 +3030,7 @@ def add_newdoc(place, name, doc): vector-matrix product is defined as: .. math:: - \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + \\mathbf{v} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} where the sum is over the last dimension of ``x1`` and the one-but-last dimensions in ``x2`` (unless `axes` is specified) and where From 39a422230997bfb282503b74b39a26bc75be9a85 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 11:30:44 +0200 Subject: [PATCH 040/294] MAINT: Enforce ruff E501 --- benchmarks/benchmarks/bench_core.py | 3 ++- benchmarks/benchmarks/bench_function_base.py | 3 ++- benchmarks/benchmarks/bench_ufunc_strides.py | 5 +++-- numpy/_typing/_dtype_like.py | 2 +- numpy/tests/test_public_api.py | 2 +- ruff.toml | 9 ++++----- 6 files changed, 13 insertions(+), 11 deletions(-) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 5e174704f105..a9a6c88b87a0 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -151,7 +151,8 @@ class CountNonzero(Benchmark): params = [ [1, 2, 3], [100, 10000, 1000000], - [bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, str, object] + [bool, np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, str, object] ] def setup(self, numaxes, size, dtype): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 57499dc761f8..9b770aeb60bf 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -241,7 +241,8 @@ class Sort(Benchmark): def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) + generate_array_method = getattr(SortGenerator, array_class) + self.arr = generate_array_method(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index b86be87f9e68..95df16e2cb5e 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -208,8 +208,9 @@ def train(self, max_epoch): for epoch in range(max_epoch): z = np.matmul(self.X_train, self.W) A = 1 / (1 + np.exp(-z)) # sigmoid(z) - loss = -np.mean(self.Y_train * np.log(A) + (1 - self.Y_train) * np.log(1 - A)) - dz = A - self.Y_train + Y_train = self.Y_train + loss = -np.mean(Y_train * np.log(A) + (1 - Y_train) * np.log(1 - A)) + dz = A - Y_train dw = (1 / self.size) * np.matmul(self.X_train.T, dz) self.W = self.W - self.alpha * dw diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index c406b3098384..d341db5dc23a 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -55,7 +55,7 @@ def dtype(self) -> _DTypeT_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # noqa: E501 # Would create a dtype[np.void] diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index a56cd13296e3..6a36358c3a06 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -780,7 +780,7 @@ def test___qualname___and___module___attribute(): inspect.ismodule(member) and # it's a module "numpy" in member.__name__ and # inside NumPy not member_name.startswith("_") and # not private - member_name not in {"tests", "typing"} and # 2024-12: type names don't match + member_name not in {"tests", "typing"} and # type names don't match "numpy._core" not in member.__name__ and # outside _core member not in visited_modules # not visited yet ): diff --git a/ruff.toml b/ruff.toml index 6b05d8de69ee..c0008da43226 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,16 +68,15 @@ ignore = [ "_tempita.py" = ["B909"] "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"benchmarks/*py" = ["E501"] +"benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/**" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/_typing/*py" = ["E501"] +"numpy/_typing/_array_like.py" = ["E501"] "numpy/lib/tests/*py" = ["E501"] -"numpy/linalg/tests/*py" = ["E501"] -"numpy/ma/tests/*py" = ["E501"] -"numpy/tests/*py" = ["E501"] +"numpy/linalg/tests/test_linalg.py" = ["E501"] +"numpy/tests/test_configtool.py" = ["E501"] "numpy*pyi" = ["E501"] "numpy/f2py/*py" = ["E501"] "__init__.py" = ["F401", "F403", "F405"] From f8b37c123485032260319f8f79ede1d6ed7e7694 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 12:13:12 +0200 Subject: [PATCH 041/294] enforce more files --- numpy/lib/tests/test_format.py | 3 --- numpy/lib/tests/test_histograms.py | 8 ++++---- numpy/lib/tests/test_recfunctions.py | 5 ++--- ruff.toml | 5 ++++- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index d805d3493ca4..2ab7026ccc7c 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -384,9 +384,6 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index b7752d1a8f1e..be5268d9813a 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -554,7 +554,7 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" # noqa: E501 def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -562,10 +562,10 @@ def nbins_ratio(seed, size): a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) return a / (a + b) - ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] - for seed in range(10)] + geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) + ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. + # the average difference between the two methods decreases as the dataset size increases. # noqa: E501 avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index eee1f47f834f..72377b8f7c35 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -524,9 +524,8 @@ def test_flatten_wflexible(self): assert_equal(test, control) test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int), ('bc', [])])])] + f1_descr = [('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])] + controldtype = [('f0', int), ('f1', f1_descr)] control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], dtype=controldtype) assert_equal(test, control) diff --git a/ruff.toml b/ruff.toml index c0008da43226..7ea90ee57b69 100644 --- a/ruff.toml +++ b/ruff.toml @@ -74,7 +74,10 @@ ignore = [ "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/_typing/_array_like.py" = ["E501"] -"numpy/lib/tests/*py" = ["E501"] +"numpy/lib/tests/test_function_base.py" = ["E501"] +"numpy/lib/tests/test_format.py" = ["E501"] +"numpy/lib/tests/test_io.py" = ["E501"] +"numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] "numpy*pyi" = ["E501"] From 9ea773dec671da1c08068250d64813098d55bb3b Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 12:24:05 +0200 Subject: [PATCH 042/294] more enforcement --- numpy/_core/tests/test_cython.py | 3 ++- numpy/_core/tests/test_function_base.py | 3 ++- numpy/_core/tests/test_half.py | 2 +- numpy/_core/tests/test_indexing.py | 2 +- numpy/_core/tests/test_mem_overlap.py | 4 ++-- numpy/_core/tests/test_scalarbuffer.py | 4 ++-- numpy/_core/tests/test_unicode.py | 3 ++- ruff.toml | 19 ++++++++++++++++++- 8 files changed, 30 insertions(+), 10 deletions(-) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index fb3839fd2685..2c7b40c5614c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -345,7 +345,8 @@ def test_npystring_allocators_other_dtype(install_temp): assert checks.npystring_allocators_other_types(arr1, arr2) == 0 -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') def test_npy_uintp_type_enum(): import checks assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index c925cf1f77e5..3a8552de2d36 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -32,7 +32,8 @@ def _is_armhf(): # Check if the current platform is ARMHF (32-bit ARM architecture) - return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 68f17b2a5e14..e2d6e6796db4 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -531,7 +531,7 @@ def test_half_fpe(self): assert_raises_fpe('overflow', lambda a, b: a - b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 assert_raises_fpe('overflow', np.spacing, float16(65504)) # Invalid value errors diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index e722d0c1a9df..1d42cde48682 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1352,7 +1352,7 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together + # This used to incorrectly give a ValueError: operands could not be broadcast together # noqa: E501 idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index d1735670ad6b..b437a7e14298 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -166,7 +166,7 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", + f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", # noqa: E501 f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -402,7 +402,7 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) + assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) # noqa: E501 assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 4d6b5bdd73fc..c957aec4f9b2 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -128,8 +128,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), 'format': '2w', - 'readonly': True} + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} v = memoryview(s) assert self._as_dict(v) == expected diff --git a/numpy/_core/tests/test_unicode.py b/numpy/_core/tests/test_unicode.py index 9fdc55b0e322..6a86503a35ae 100644 --- a/numpy/_core/tests/test_unicode.py +++ b/numpy/_core/tests/test_unicode.py @@ -134,7 +134,8 @@ def test_valuesSD(self): def test_valuesMD(self): # Check creation of multi-dimensional objects with values - ua = np.array([[[self.ucs_value * self.ulen] * 2] * 3] * 4, dtype=f'U{self.ulen}') + data = [[[self.ucs_value * self.ulen] * 2] * 3] * 4 + ua = np.array(data, dtype=f'U{self.ulen}') self.content_check(ua, ua[0, 0, 0], 4 * self.ulen * 2 * 3 * 4) self.content_check(ua, ua[-1, -1, -1], 4 * self.ulen * 2 * 3 * 4) diff --git a/ruff.toml b/ruff.toml index 7ea90ee57b69..b4190fe41787 100644 --- a/ruff.toml +++ b/ruff.toml @@ -69,7 +69,24 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] "benchmarks/benchmarks/bench_linalg.py" = ["E501"] -"numpy/_core/tests/**" = ["E501"] +"numpy/_core/tests/test_api.py" = ["E501"] +"numpy/_core/tests/test_arrayprint.py" = ["E501"] +"numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] +"numpy/_core/tests/test_cpu_features.py" = ["E501"] +"numpy/_core/tests/test_datetime.py" = ["E501"] +"numpy/_core/tests/test_dtype.py" = ["E501"] +"numpy/_core/tests/test_defchararray.py" = ["E501"] +"numpy/_core/tests/test_einsum.py" = ["E501"] +"numpy/_core/tests/test_multiarray.py" = ["E501"] +"numpy/_core/tests/test_multithreading.py" = ["E501"] +"numpy/_core/tests/test_nditer*py" = ["E501"] +"numpy/_core/tests/test_ufunc*py" = ["E501"] +"numpy/_core/tests/test_umath*py" = ["E501"] +"numpy/_core/tests/test_numeric*.py" = ["E501"] +"numpy/_core/tests/test_regression.py" = ["E501"] +"numpy/_core/tests/test_shape_base.py" = ["E501"] +"numpy/_core/tests/test_simd*.py" = ["E501"] +"numpy/_core/tests/test_strings.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] From f84d9645584d85be1bdd688927f53792b424959d Mon Sep 17 00:00:00 2001 From: Hamza Meel Date: Tue, 27 May 2025 12:47:50 +0200 Subject: [PATCH 043/294] DOC: fix typo in documentation of matvec Same as for commit fba2e60fb9, the vector was referred to as v in the body of the summary but b in the displayed math. This commit fixes the inconsistency. --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index aa9fe4acea05..ddae87bd6012 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -2963,7 +2963,7 @@ def add_newdoc(place, name, doc): matrix-vector product is defined as: .. math:: - \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + \\mathbf{A} \\cdot \\mathbf{v} = \\sum_{j=0}^{n-1} A_{ij} v_j where the sum is over the last dimensions in ``x1`` and ``x2`` (unless ``axes`` is specified). (For a matrix-vector product with the From 7eacb9efdb67733081eb3308cbe149f24800f3dc Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Tue, 27 May 2025 16:38:13 +0100 Subject: [PATCH 044/294] Merge pull request #29063 from MarcoGorelli/align-maskedarray-with-ndarray --- numpy/typing/tests/data/fail/ma.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 41306b23fe78..5dc6706ebf81 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -2,10 +2,10 @@ from typing import TypeAlias, TypeVar import numpy as np import numpy.typing as npt -from numpy._typing import _Shape +from numpy._typing import _AnyShape _ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] MAR_b: MaskedArray[np.bool] From ba27d95903ba9d3d201c051ce9c534380b5b3f4d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 27 May 2025 12:16:06 -0600 Subject: [PATCH 045/294] BUG: add bounds-checking to in-place string multiply (#29060) * BUG: add bounds-checking to in-place string multiply * MNT: check for overflow and raise OverflowError * MNT: respond to review suggestion * MNT: handle overflow in one more spot * MNT: make test behave the same on all architectures * MNT: reorder to avoid work in some cases --- doc/release/upcoming_changes/29060.change.rst | 3 ++ numpy/_core/src/umath/string_buffer.h | 12 +++++ numpy/_core/src/umath/string_ufuncs.cpp | 48 ++++++++++++++----- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 ++--- numpy/_core/strings.py | 2 +- numpy/_core/tests/test_stringdtype.py | 4 +- numpy/_core/tests/test_strings.py | 13 ++++- numpy/typing/tests/data/pass/ma.py | 3 +- 8 files changed, 74 insertions(+), 23 deletions(-) create mode 100644 doc/release/upcoming_changes/29060.change.rst diff --git a/doc/release/upcoming_changes/29060.change.rst b/doc/release/upcoming_changes/29060.change.rst new file mode 100644 index 000000000000..1561da7bf94e --- /dev/null +++ b/doc/release/upcoming_changes/29060.change.rst @@ -0,0 +1,3 @@ +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 554f9ece5197..dafedcbc03ff 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -297,6 +297,18 @@ struct Buffer { return num_codepoints; } + inline size_t + buffer_width() + { + switch (enc) { + case ENCODING::ASCII: + case ENCODING::UTF8: + return after - buf; + case ENCODING::UTF32: + return (after - buf) / sizeof(npy_ucs4); + } + } + inline Buffer& operator+=(npy_int64 rhs) { diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index ffd757815364..95f30ccb109e 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -15,6 +15,7 @@ #include "dtypemeta.h" #include "convert_datatype.h" #include "gil_utils.h" +#include "templ_common.h" /* for npy_mul_size_with_overflow_size_t */ #include "string_ufuncs.h" #include "string_fastsearch.h" @@ -166,26 +167,44 @@ string_add(Buffer buf1, Buffer buf2, Buffer out) template -static inline void +static inline int string_multiply(Buffer buf1, npy_int64 reps, Buffer out) { size_t len1 = buf1.num_codepoints(); if (reps < 1 || len1 == 0) { out.buffer_fill_with_zeros_after_index(0); - return; + return 0; } if (len1 == 1) { out.buffer_memset(*buf1, reps); out.buffer_fill_with_zeros_after_index(reps); + return 0; } - else { - for (npy_int64 i = 0; i < reps; i++) { - buf1.buffer_memcpy(out, len1); - out += len1; - } - out.buffer_fill_with_zeros_after_index(0); + + size_t newlen; + if (NPY_UNLIKELY(npy_mul_with_overflow_size_t(&newlen, reps, len1) != 0) || newlen > PY_SSIZE_T_MAX) { + return -1; + } + + size_t pad = 0; + size_t width = out.buffer_width(); + if (width < newlen) { + reps = width / len1; + pad = width % len1; } + + for (npy_int64 i = 0; i < reps; i++) { + buf1.buffer_memcpy(out, len1); + out += len1; + } + + buf1.buffer_memcpy(out, pad); + out += pad; + + out.buffer_fill_with_zeros_after_index(0); + + return 0; } @@ -238,7 +257,9 @@ string_multiply_strint_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in1, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in2, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in2, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -267,7 +288,9 @@ string_multiply_intstr_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in2, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in1, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in1, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -752,10 +775,11 @@ string_multiply_resolve_descriptors( if (given_descrs[2] == NULL) { PyErr_SetString( PyExc_TypeError, - "The 'out' kwarg is necessary. Use numpy.strings.multiply without it."); + "The 'out' kwarg is necessary when using the string multiply ufunc " + "directly. Use numpy.strings.multiply to multiply strings without " + "specifying 'out'."); return _NPY_ERROR_OCCURRED_IN_CAST; } - loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index adcbfd3b7480..b0181d4186c9 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -137,9 +137,9 @@ static int multiply_loop_core( size_t newsize; int overflowed = npy_mul_with_overflow_size_t( &newsize, cursize, factor); - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in string multiply"); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in string multiply"); goto fail; } @@ -1748,9 +1748,9 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, width - num_codepoints); newsize += s1.size; - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in %s", ufunc_name); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in %s", ufunc_name); goto fail; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index fc0a2d0b4d1a..b4dc1656024f 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -218,7 +218,7 @@ def multiply(a, i): # Ensure we can do a_len * i without overflow. if np.any(a_len > sys.maxsize / np.maximum(i, 1)): - raise MemoryError("repeated string is too long") + raise OverflowError("Overflow encountered in string multiply") buffersizes = a_len * i out_dtype = f"{a.dtype.char}{buffersizes.max()}" diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 1c15c4895eaf..9bab810d4421 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -128,8 +128,8 @@ def test_null_roundtripping(): def test_string_too_large_error(): arr = np.array(["a", "b", "c"], dtype=StringDType()) - with pytest.raises(MemoryError): - arr * (2**63 - 2) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) @pytest.mark.parametrize( diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index f6de208d7951..56e928df4d7b 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -224,9 +224,20 @@ def test_multiply_raises(self, dt): with pytest.raises(TypeError, match="unsupported type"): np.strings.multiply(np.array("abc", dtype=dt), 3.14) - with pytest.raises(MemoryError): + with pytest.raises(OverflowError): np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, np.int64, np.int_]) def test_multiply_integer_dtypes(self, i_dt, dt): diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index dc9474fe4069..e7915a583210 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -16,7 +16,8 @@ MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) -MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], np.ma.MaskedArray(["a"], "T")) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) From 5a8775642acfa24907dd52684c4be26f1ad3e852 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 12:01:46 +0100 Subject: [PATCH 046/294] MAINT: Convert umath_linalg to multi-phase init (PEP 489) --- .../upcoming_changes/29030.compatibility.rst | 6 ++ numpy/linalg/umath_linalg.cpp | 71 ++++++++++++------- numpy/tests/test_lazyloading.py | 36 +++++----- numpy/tests/test_reloading.py | 35 +++++---- 4 files changed, 91 insertions(+), 57 deletions(-) create mode 100644 doc/release/upcoming_changes/29030.compatibility.rst diff --git a/doc/release/upcoming_changes/29030.compatibility.rst b/doc/release/upcoming_changes/29030.compatibility.rst new file mode 100644 index 000000000000..cf08551e28ee --- /dev/null +++ b/doc/release/upcoming_changes/29030.compatibility.rst @@ -0,0 +1,6 @@ +* NumPy's C extension modules have begun to use multi-phase initialisation, + as defined by :pep:`489`. As part of this, a new explicit check has been added + that each such module is only imported once per Python process. This comes with + the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing + it will now fail with an ``ImportError``. This has always been unsafe, with + unexpected side-effects, though did not previously raise an error. diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index ead6d84a73a2..1b6850145bc8 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -4688,57 +4688,54 @@ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - UMATH_LINALG_MODULE_NAME, - NULL, - -1, - UMath_LinAlgMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__umath_linalg(void) +static int +_umath_linalg_exec(PyObject *m) { - PyObject *m; PyObject *d; PyObject *version; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } d = PyModule_GetDict(m); if (d == NULL) { - return NULL; + return -1; } version = PyUnicode_FromString(umath_linalg_version_string); if (version == NULL) { - return NULL; + return -1; } int ret = PyDict_SetItemString(d, "__version__", version); Py_DECREF(version); if (ret < 0) { - return NULL; + return -1; } /* Load the ufunc operators into the module's namespace */ if (addUfuncs(d) < 0) { - return NULL; + return -1; } #if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK lapack_lite_lock = PyThread_allocate_lock(); if (lapack_lite_lock == NULL) { PyErr_NoMemory(); - return NULL; + return -1; } #endif @@ -4748,10 +4745,30 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _umath_linalg_slots[] = { + {Py_mod_exec, (void*)_umath_linalg_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_umath_linalg", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + UMath_LinAlgMethods, /* m_methods */ + _umath_linalg_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__umath_linalg(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/tests/test_lazyloading.py b/numpy/tests/test_lazyloading.py index 5f6233f1c5cb..7b0324802611 100644 --- a/numpy/tests/test_lazyloading.py +++ b/numpy/tests/test_lazyloading.py @@ -1,23 +1,23 @@ +import subprocess import sys -from importlib.util import LazyLoader, find_spec, module_from_spec +import textwrap import pytest +from numpy.testing import IS_WASM -# Warning raised by _reload_guard() in numpy/__init__.py -@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_lazy_load(): # gh-22045. lazyload doesn't import submodule names into the namespace - # muck with sys.modules to test the importing system - old_numpy = sys.modules.pop("numpy") - numpy_modules = {} - for mod_name, mod in list(sys.modules.items()): - if mod_name[:6] == "numpy.": - numpy_modules[mod_name] = mod - sys.modules.pop(mod_name) + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from importlib.util import LazyLoader, find_spec, module_from_spec - try: # create lazy load of numpy as np spec = find_spec("numpy") module = module_from_spec(spec) @@ -31,8 +31,12 @@ def test_lazy_load(): # test triggering the import of the package np.ndarray - - finally: - if old_numpy: - sys.modules["numpy"] = old_numpy - sys.modules.update(numpy_modules) + """) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index c21dc007b232..3e6ded326941 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -48,27 +48,34 @@ def test_novalue(): @pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_full_reimport(): - """At the time of writing this, it is *not* truly supported, but - apparently enough users rely on it, for it to be an annoying change - when it started failing previously. - """ + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + # Test within a new process, to ensure that we do not mess with the # global state during the test run (could lead to cryptic test failures). # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys - from pytest import warns import numpy as np - for k in list(sys.modules.keys()): - if "numpy" in k: - del sys.modules[k] + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] - with warns(UserWarning): + try: import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") """) - p = subprocess.run([sys.executable, '-c', code], capture_output=True) - if p.returncode: - raise AssertionError( - f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" - ) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout From 7ae80cd29f1ff734d7f42038ca89cb27d6a81a62 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 14:42:26 +0100 Subject: [PATCH 047/294] MAINT: Convert dummymodule to multi-phase init (PEP 489) --- numpy/_core/src/dummymodule.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 2f293d6c4cd6..e1ef80ab3af3 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -14,25 +14,27 @@ static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; +static struct PyModuleDef_Slot dummy_slots[] = { +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + // signal that this module can be imported in isolated subinterpreters + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "dummy", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "dummy", + .m_size = 0, + .m_methods = methods, + .m_slots = dummy_slots, }; /* Initialization function for the module */ PyMODINIT_FUNC PyInit__dummy(void) { - PyObject *m; - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } - return m; + return PyModuleDef_Init(&moduledef); } From 0145d7c379e245bfd259bf1c519441da4bea089b Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 14:47:45 +0100 Subject: [PATCH 048/294] MAINT: Convert lapack_lite to multi-phase init (PEP 489) --- numpy/linalg/lapack_litemodule.c | 62 ++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 23 deletions(-) diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index e5f3af05af22..cad5f3f92f09 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -377,28 +377,25 @@ static struct PyMethodDef lapack_lite_module_methods[] = { { NULL,NULL,0, NULL} }; +static int module_loaded = 0; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "lapack_lite", - NULL, - -1, - lapack_lite_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit_lapack_lite(void) +static int +lapack_lite_exec(PyObject *m) { - PyObject *m,*d; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + PyObject *d; + + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; + + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); + d = PyModule_GetDict(m); LapackError = PyErr_NewException("numpy.linalg.lapack_lite.LapackError", NULL, NULL); PyDict_SetItemString(d, "LapackError", LapackError); @@ -409,10 +406,29 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot lapack_lite_slots[] = { + {Py_mod_exec, lapack_lite_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "lapack_lite", + .m_size = 0, + .m_methods = lapack_lite_module_methods, + .m_slots = lapack_lite_slots, +}; - return m; +PyMODINIT_FUNC PyInit_lapack_lite(void) { + return PyModuleDef_Init(&moduledef); } From 3363b38d99f87601869c2c9d68c2b16e54509675 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Wed, 28 May 2025 16:06:24 +0100 Subject: [PATCH 049/294] TYP: Type ``MaskedArray.__{add,radd,sub,rsub}__`` (#29012) --- numpy/__init__.pyi | 14 +- numpy/ma/core.pyi | 202 +++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 255 ++++++++++++++++++++++++++ 3 files changed, 462 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0359e605a1c3..0e8b4625e7d4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2809,6 +2809,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + # Keep in sync with `MaskedArray.__add__` @overload def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2856,6 +2857,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2903,6 +2905,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__sub__` @overload def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2940,6 +2943,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rsub__` @overload def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3359,6 +3363,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. + # Keep in sync with `MaskedArray.__iadd__` @overload def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3386,7 +3391,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__isub__` @overload def __isub__( self: NDArray[unsignedinteger], @@ -3404,7 +3409,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__imul__` @overload def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3426,6 +3431,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `MaskedArray.__ipow__` @overload def __ipow__( self: NDArray[unsignedinteger], @@ -3441,7 +3447,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__itruediv__` @overload def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3449,7 +3455,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # keep in sync with `__imod__` + # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` @overload def __ifloordiv__( self: NDArray[unsignedinteger], diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 7e87611037b9..388619e1a654 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2,7 +2,7 @@ # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 from collections.abc import Sequence -from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload +from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete from typing_extensions import TypeIs, TypeVar @@ -19,17 +19,23 @@ from numpy import ( bool_, bytes_, character, + complex128, complexfloating, datetime64, dtype, dtypes, expand_dims, + float16, + float32, float64, floating, generic, + inexact, int_, + integer, intp, ndarray, + number, object_, signedinteger, str_, @@ -40,14 +46,21 @@ from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, NDArray, + _32Bit, + _64Bit, _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, _ArrayLikeStr_co, _ArrayLikeString_co, _ArrayLikeTD64_co, @@ -247,8 +260,18 @@ _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] + +_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] +_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] +_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] + _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ @@ -463,10 +486,179 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... + + # Keep in sync with `ndarray.__add__` + @overload + def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __add__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__radd__` + @overload # signature equivalent to __add__ + def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __radd__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload + def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __mul__(self, other): ... def __rmul__(self, other): ... def __truediv__(self, other): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index deda19c3d743..97f833b6a488 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -12,21 +12,35 @@ class MaskedArraySubclass(MaskedArray[np.complex128]): ... AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] MAR_f8: MaskedArray[np.float64] MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] MAR_dt64: MaskedArray[np.datetime64] MAR_td64: MaskedArray[np.timedelta64] MAR_o: MaskedArray[np.object_] MAR_s: MaskedArray[np.str_] MAR_byte: MaskedArray[np.bytes_] MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] MAR_subclass: MaskedArraySubclass @@ -368,3 +382,244 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) From f9baafb4e0c529939e94218ec500ccc3ee5d5dd4 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Wed, 28 May 2025 22:18:28 +0200 Subject: [PATCH 050/294] review comments --- numpy/_core/tests/test_indexing.py | 3 ++- numpy/_core/tests/test_mem_overlap.py | 7 +++++-- numpy/_typing/_dtype_like.py | 2 +- numpy/lib/tests/test_histograms.py | 6 ++++-- ruff.toml | 8 ++++++-- 5 files changed, 18 insertions(+), 8 deletions(-) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 1d42cde48682..757b8d72782f 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1352,7 +1352,8 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together # noqa: E501 + # This used to incorrectly give a ValueError: operands could not be + # broadcast together idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index b437a7e14298..78b943854679 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -165,8 +165,9 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", # noqa: E501 + f"base_a - base_b = {base_delta!r}", f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -402,7 +403,9 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) # noqa: E501 + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index d341db5dc23a..c406b3098384 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -55,7 +55,7 @@ def dtype(self) -> _DTypeT_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # noqa: E501 +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # Would create a dtype[np.void] diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index be5268d9813a..4ba953f462fc 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -554,7 +554,8 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" # noqa: E501 + # Verify that Scott's rule and Stone's rule converges for normally + # distributed data def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -565,7 +566,8 @@ def nbins_ratio(seed, size): geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. # noqa: E501 + # the average difference between the two methods decreases as the dataset + # size increases. avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) diff --git a/ruff.toml b/ruff.toml index b4190fe41787..deb52e834df9 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,6 +68,7 @@ ignore = [ "_tempita.py" = ["B909"] "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] + "benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/test_api.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] @@ -90,15 +91,18 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/_typing/_array_like.py" = ["E501"] "numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] -"numpy*pyi" = ["E501"] "numpy/f2py/*py" = ["E501"] +# for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length +"numpy/_typing/_array_like.py" = ["E501"] +"numpy/_typing/_dtype_like.py" = ["E501"] +"numpy*pyi" = ["E501"] + "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] "numpy/_core/defchararray.py" = ["F403", "F405"] From 5ab56f649972b77daf20f7f5aa3e796232c05a00 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Thu, 29 May 2025 08:41:39 +0100 Subject: [PATCH 051/294] Convert pocketfft_umath to multi-phase init (PEP 489) (#29028) --- numpy/fft/_pocketfft_umath.cpp | 64 +++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 525b5e5a23da..ab8af5aa522e 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -388,41 +388,57 @@ add_gufuncs(PyObject *dictionary) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__pocketfft_umath(void) +static int +_pocketfft_umath_exec(PyObject *m) { - PyObject *m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Import the array and ufunc objects */ - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } PyObject *d = PyModule_GetDict(m); if (add_gufuncs(d) < 0) { Py_DECREF(d); - Py_DECREF(m); - return NULL; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _pocketfft_umath_slots[] = { + {Py_mod_exec, (void*)_pocketfft_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_pocketfft_umath", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + NULL, /* m_methods */ + _pocketfft_umath_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__pocketfft_umath(void) { + return PyModuleDef_Init(&moduledef); } From e107f24f32254d063249fc84e34aa707d826b054 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Thu, 29 May 2025 13:04:38 +0100 Subject: [PATCH 052/294] MAINT: Convert multiarray to multi-phase init (PEP 489) (#29022) --- numpy/_core/__init__.py | 4 + .../src/multiarray/_multiarray_tests.c.src | 59 ++++--- numpy/_core/src/multiarray/multiarraymodule.c | 166 +++++++++--------- 3 files changed, 125 insertions(+), 104 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 7b19cefb2f93..b0be8d1cbab6 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -23,6 +23,10 @@ except ImportError as exc: import sys + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + # Basically always, the problem should be that the C module is wrong/missing... if ( isinstance(exc, ModuleNotFoundError) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fc73a64b19a0..8012a32b070e 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2413,41 +2413,56 @@ static PyMethodDef Multiarray_TestsMethods[] = { }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_tests", - NULL, - -1, - Multiarray_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__multiarray_tests(void) +static int +_multiarray_tests_exec(PyObject *m) { - PyObject *m; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return m; + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); if (init_argparse_mutex() < 0) { - return NULL; + return -1; } if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_tests_slots[] = { + {Py_mod_exec, _multiarray_tests_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_tests", + .m_size = 0, + .m_methods = Multiarray_TestsMethods, + .m_slots = _multiarray_tests_slots, +}; - return m; +PyMODINIT_FUNC PyInit__multiarray_tests(void) +{ + return PyModuleDef_Init(&moduledef); } NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8ba38b555edb..022a54fe17da 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4773,36 +4773,27 @@ initialize_thread_unsafe_state(void) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - array_module_methods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__multiarray_umath(void) { - PyObject *m, *d, *s; - PyObject *c_api; +static int +_multiarray_umath_exec(PyObject *m) { + PyObject *d, *s, *c_api; - /* Create the module and add the functions */ - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Initialize CPU features */ if (npy_cpu_init() < 0) { - goto err; + return -1; } /* Initialize CPU dispatch tracer */ if (npy_cpu_dispatch_tracer_init(m) < 0) { - goto err; + return -1; } #if defined(MS_WIN64) && defined(__GNUC__) @@ -4818,62 +4809,62 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { numpy_pydatetime_import(); if (PyErr_Occurred()) { - goto err; + return -1; } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); if (!d) { - goto err; + return -1; } if (intern_strings() < 0) { - goto err; + return -1; } if (initialize_static_globals() < 0) { - goto err; + return -1; } if (initialize_thread_unsafe_state() < 0) { - goto err; + return -1; } if (init_import_mutex() < 0) { - goto err; + return -1; } if (init_extobj() < 0) { - goto err; + return -1; } if (PyType_Ready(&PyUFunc_Type) < 0) { - goto err; + return -1; } PyArrayDTypeMeta_Type.tp_base = &PyType_Type; if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { - goto err; + return -1; } PyArrayDescr_Type.tp_hash = PyArray_DescrHash; Py_SET_TYPE(&PyArrayDescr_Type, &PyArrayDTypeMeta_Type); if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; + return -1; } initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArray_Type) < 0) { - goto err; + return -1; } if (setup_scalartypes(d) < 0) { - goto err; + return -1; } PyArrayIter_Type.tp_iter = PyObject_SelfIter; @@ -4881,28 +4872,28 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = PyArray_free; if (PyType_Ready(&PyArrayIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMapIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { - goto err; + return -1; } PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&NpyIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFlags_Type) < 0) { - goto err; + return -1; } NpyBusDayCalendar_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) { - goto err; + return -1; } /* @@ -4923,43 +4914,43 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { s = npy_cpu_features_dict(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_features__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_baseline_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_baseline__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_dispatch_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_dispatch__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = PyCapsule_New((void *)_datetime_strings, NULL, NULL); if (s == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); #define ADDCONST(NAME) \ - s = PyLong_FromLong(NPY_##NAME); \ + s = PyLong_FromLong(NPY_##NAME); \ PyDict_SetItemString(d, #NAME, s); \ Py_DECREF(s) @@ -4999,39 +4990,39 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* Finalize scalar types and expose them via namespace or typeinfo dict */ if (set_typeinfo(d) != 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFunctionDispatcher_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_ArrayFunctionDispatcher", (PyObject *)&PyArrayFunctionDispatcher_Type); if (PyType_Ready(&PyArrayArrayConverter_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_array_converter", (PyObject *)&PyArrayArrayConverter_Type); if (PyType_Ready(&PyArrayMethod_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyBoundArrayMethod_Type) < 0) { - goto err; + return -1; } if (initialize_and_map_pytypes_to_dtypes() < 0) { - goto err; + return -1; } if (PyArray_InitializeCasts() < 0) { - goto err; + return -1; } if (init_string_dtype() < 0) { - goto err; + return -1; } /* @@ -5040,7 +5031,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyDataMem_DefaultHandler = PyCapsule_New( &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); if (PyDataMem_DefaultHandler == NULL) { - goto err; + return -1; } /* @@ -5049,32 +5040,32 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { */ current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); if (current_handler == NULL) { - goto err; + return -1; } if (initumath(m) != 0) { - goto err; + return -1; } if (set_matmul_flags(d) < 0) { - goto err; + return -1; } // initialize static references to ndarray.__array_*__ special methods npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); if (npy_static_pydata.ndarray_array_finalize == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); if (npy_static_pydata.ndarray_array_ufunc == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); if (npy_static_pydata.ndarray_array_function == NULL) { - goto err; + return -1; } /* @@ -5090,13 +5081,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - goto err; + return -1; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); @@ -5104,13 +5095,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); if (npy_static_pydata.zero_pyint_like_arr == NULL) { - goto err; + return -1; } ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); if (verify_static_structs_initialized() < 0) { - goto err; + return -1; } /* @@ -5120,33 +5111,44 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* The dtype API is not auto-filled/generated via Python scripts: */ _fill_dtype_api(PyArray_API); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_UFUNC_API", c_api); Py_DECREF(c_api); if (PyErr_Occurred()) { - goto err; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_umath_slots[] = { + {Py_mod_exec, _multiarray_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; - return m; +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_umath", + .m_size = 0, + .m_methods = array_module_methods, + .m_slots = _multiarray_umath_slots, +}; - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - Py_DECREF(m); - return NULL; +PyMODINIT_FUNC PyInit__multiarray_umath(void) { + return PyModuleDef_Init(&moduledef); } From b37f7616879b8ce977deca81069c886a4096511d Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 29 May 2025 17:33:36 +0300 Subject: [PATCH 053/294] BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 (#29039) * BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel build] * Update requirements/ci_requirements.txt Co-authored-by: Sebastian Berg * use pip to install anaconda-client on win-arm64 [wheel build] * allow noblas in win32 wheels, use scipy-openblas32 on win-arm64 [wheel build] * improve runner arch detection logic [wheel build] * remove win_arm64 cibuildwheel override * remove 'strip' before calling delvewheel [wheel build] * use openblas 0.3.29.265 only on win-arm64 [wheel build] * add comment about lack of win-arm64 openblas64 wheels [wheel build] --------- Co-authored-by: Sebastian Berg Co-authored-by: Joe Rickerby --- .github/workflows/wheels.yml | 13 ++++++++++++- pyproject.toml | 9 ++------- requirements/ci32_requirements.txt | 3 ++- requirements/ci_requirements.txt | 6 ++++-- tools/wheels/cibw_before_build.sh | 26 ++++++++++++++++++-------- tools/wheels/repair_windows.sh | 23 ----------------------- 6 files changed, 38 insertions(+), 42 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index fa2c1cb5ae97..097efe8e7225 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -186,7 +186,8 @@ jobs: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + - name: install micromamba + uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to @@ -200,6 +201,16 @@ jobs: create-args: >- anaconda-client + - name: win-arm64 install anaconda client + if: ${{ matrix.buildplat[1] == 'win_arm64' }} + run: | + # Rust installation needed for rpds-py. + Invoke-WebRequest https://static.rust-lang.org/rustup/dist/aarch64-pc-windows-msvc/rustup-init.exe -UseBasicParsing -Outfile rustup-init.exe + .\rustup-init.exe -y + $env:PATH="$env:PATH;$env:USERPROFILE\.cargo\bin" + pip install anaconda-client + + - name: Upload wheels if: success() && github.repository == 'numpy/numpy' shell: bash -el {0} diff --git a/pyproject.toml b/pyproject.toml index 5cf75b20a6b6..1e08544ced75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -178,21 +178,16 @@ repair-wheel-command = [ ] [tool.cibuildwheel.windows] -# This does not work, use CIBW_ENVIRONMENT_WINDOWS -environment = {PKG_CONFIG_PATH="./.openblas"} config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" +# This does not work, use CIBW_ENVIRONMENT_WINDOWS +environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] select = "*-win32" config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*-win_arm64" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" -repair-wheel-command = "" - [[tool.cibuildwheel.overrides]] select = "*pyodide*" before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 5a7be719214a..74c9a51ec111 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,4 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index adf7d86558f0..b6ea06c812c8 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,6 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 -scipy-openblas64==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' +# Note there is not yet a win-arm64 wheel, so we currently only exclude win-arm64 +scipy-openblas64==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 3e1d4498fe7c..e41e5d37316b 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -22,9 +22,6 @@ fi if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false -elif [[ $(python -c"import sysconfig; print(sysconfig.get_platform())") == "win-arm64" ]]; then - echo "No BLAS used for ARM64 wheels" - export INSTALL_OPENBLAS=false elif [ -z $INSTALL_OPENBLAS ]; then # the macos_arm64 build might not set this variable export INSTALL_OPENBLAS=true @@ -32,20 +29,33 @@ fi # Install Openblas from scipy-openblas64 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then - echo PKG_CONFIG_PATH $PKG_CONFIG_PATH + # by default, use scipy-openblas64 + OPENBLAS=openblas64 + # Possible values for RUNNER_ARCH in github are + # X86, X64, ARM, or ARM64 + # TODO: should we detect a missing RUNNER_ARCH and use platform.machine() + # when wheel build is run outside github? + # On 32-bit platforms, use scipy_openblas32 + # On win-arm64 use scipy_openblas32 + if [[ $RUNNER_ARCH == "X86" || $RUNNER_ARCH == "ARM" ]] ; then + OPENBLAS=openblas32 + elif [[ $RUNNER_ARCH == "ARM64" && $RUNNER_OS == "Windows" ]] ; then + OPENBLAS=openblas32 + fi + echo PKG_CONFIG_PATH is $PKG_CONFIG_PATH, OPENBLAS is ${OPENBLAS} PKG_CONFIG_PATH=$PROJECT_DIR/.openblas rm -rf $PKG_CONFIG_PATH mkdir -p $PKG_CONFIG_PATH python -m pip install -r requirements/ci_requirements.txt - python -c "import scipy_openblas64; print(scipy_openblas64.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc + python -c "import scipy_${OPENBLAS}; print(scipy_${OPENBLAS}.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will # pull these into the wheel. Use python to avoid windows/posix problems python < Date: Thu, 29 May 2025 01:10:35 -0400 Subject: [PATCH 054/294] CI: bump to cibuildwheel 3.0.0b4 [wheel build] This bumps to cibuildwheel 3.0.0b4, which contains CPython 3.14.0b2, and removes the directory changing workaround. Signed-off-by: Henry Schreiner --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- tools/wheels/cibw_test_command.sh | 4 ---- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index fea77068e128..86628f6882cd 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 + - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 097efe8e7225..3736f28cbd8c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -177,7 +177,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 + uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 2d39687a861b..60e90ef5beb6 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,10 +4,6 @@ set -xe PROJECT_DIR="$1" -if [ -d tools ]; then - cd tools -fi - python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From bcf8a99e8ca8c068f9b51a7a086000b53632c3cd Mon Sep 17 00:00:00 2001 From: Henry Schreiner Date: Wed, 28 May 2025 13:02:47 -0400 Subject: [PATCH 055/294] CI: clean up cibuildwheel config a bit [wheel build] This simplifies the configuration a bit: * Combine pyodide blocks * Use tables/lists for config-settings and skip * Remove a few repeated lines * Use a list for select Signed-off-by: Henry Schreiner --- pyproject.toml | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1e08544ced75..b0e58705ebd1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,14 +142,17 @@ tracker = "https://github.com/numpy/numpy/issues" # build wheels for in CI are controlled in `.github/workflows/wheels.yml` and # `tools/ci/cirrus_wheels.yml`. build-frontend = "build" -skip = "*_i686 *_ppc64le *_s390x *_universal2" +skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" -# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) -config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) +[tool.cibuildwheel.config-settings] +setup-args = ["-Duse-ilp64=true", "-Dallow-noblas=false"] +build-dir = "build" + [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" manylinux-aarch64-image = "manylinux_2_28" @@ -157,7 +160,14 @@ musllinux-x86_64-image = "musllinux_1_2" musllinux-aarch64-image = "musllinux_1_2" [tool.cibuildwheel.pyodide] -config-settings = "build-dir=build setup-args=--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross setup-args=-Dblas=none setup-args=-Dlapack=none" +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] [tool.cibuildwheel.linux.environment] # RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too @@ -178,22 +188,16 @@ repair-wheel-command = [ ] [tool.cibuildwheel.windows] -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" # This does not work, use CIBW_ENVIRONMENT_WINDOWS environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] -select = "*-win32" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +select = ["*-win32"] +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*pyodide*" -before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" -# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten -repair-wheel-command = "" -test-command = "python -m pytest --pyargs numpy -m 'not slow'" [tool.meson-python] meson = 'vendored-meson/meson/meson.py' From 3318fbd3a137a44d8d54ab304a16fd359d80887b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 29 May 2025 09:31:10 -0600 Subject: [PATCH 056/294] MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in f2py --- numpy/f2py/src/fortranobject.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 4e2aa370b643..5c2b4bdf0931 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -363,6 +363,8 @@ fortran_getattr(PyFortranObject *fp, char *name) { int i, j, k, flag; if (fp->dict != NULL) { + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); if (v == NULL && PyErr_Occurred()) { return NULL; @@ -371,6 +373,17 @@ fortran_getattr(PyFortranObject *fp, char *name) Py_INCREF(v); return v; } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + } for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); i++) From 336d661e958065f8c95a559a24e143b5fc747c1d Mon Sep 17 00:00:00 2001 From: crusaderky Date: Thu, 29 May 2025 20:10:18 +0100 Subject: [PATCH 057/294] Ignore all build-* directories --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index df7f084e3645..c4de68c1a9a7 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,7 @@ GTAGS ################ # meson build/installation directories build -build-install +build-* # meson python output .mesonpy-native-file.ini # sphinx build directory From d27f6618923f6b65841d7eaebcd21062ecb5a8aa Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 18:18:11 +0200 Subject: [PATCH 058/294] TYP: fix `NDArray[integer]` inplace operator mypy issue --- numpy/__init__.pyi | 94 +++------------------ numpy/ma/core.pyi | 57 +++---------- numpy/typing/tests/data/fail/arithmetic.pyi | 2 - 3 files changed, 23 insertions(+), 130 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0e8b4625e7d4..df72ce3d877a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3367,13 +3367,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3393,13 +3387,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__isub__` @overload - def __isub__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3414,15 +3402,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: ndarray[Any, dtype[signedinteger | character] | dtypes.StringDType], - other: _ArrayLikeInt_co, - /, + self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3433,13 +3413,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__ipow__` @overload - def __ipow__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3457,13 +3431,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` @overload - def __ifloordiv__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3471,13 +3439,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__ifloordiv__` @overload - def __imod__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3491,25 +3453,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__irshift__` @overload - def __ilshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # keep in sync with `__ilshift__` @overload - def __irshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3517,13 +3467,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iand__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3531,13 +3475,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ixor__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3545,13 +3483,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ior__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3559,9 +3491,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __imatmul__(self: NDArray[unsignedinteger], other: _ArrayLikeUInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 388619e1a654..da4ad3b333db 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -668,20 +668,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __pow__(self, other, mod: None = None, /): ... def __rpow__(self, other, mod: None = None, /): ... - # Keep in sync with `ndarray.__iadd__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__iadd__` @overload def __iadd__( self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __iadd__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / @@ -707,16 +700,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__isub__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. - @overload - def __isub__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `ndarray.__isub__` @overload - def __isub__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / @@ -734,20 +720,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__imul__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__imul__` @overload def __imul__( self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: MaskedArray[Any, dtype[signedinteger] | dtype[character] | dtypes.StringDType], - other: _ArrayLikeInt_co, / + self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( @@ -762,16 +742,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__ifloordiv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. - @overload - def __ifloordiv__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `ndarray.__ifloordiv__` @overload - def __ifloordiv__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __ifloordiv__( self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / @@ -781,8 +754,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__itruediv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__itruediv__` @overload def __itruediv__( self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / @@ -798,16 +770,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__ipow__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__ipow__` @overload - def __ipow__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __ipow__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index e94861a3eba7..e696083b8614 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -85,7 +85,6 @@ AR_b *= AR_LIKE_f # type: ignore[arg-type] AR_b *= AR_LIKE_c # type: ignore[arg-type] AR_b *= AR_LIKE_m # type: ignore[arg-type] -AR_u *= AR_LIKE_i # type: ignore[arg-type] AR_u *= AR_LIKE_f # type: ignore[arg-type] AR_u *= AR_LIKE_c # type: ignore[arg-type] AR_u *= AR_LIKE_m # type: ignore[arg-type] @@ -105,7 +104,6 @@ AR_b **= AR_LIKE_i # type: ignore[misc] AR_b **= AR_LIKE_f # type: ignore[misc] AR_b **= AR_LIKE_c # type: ignore[misc] -AR_u **= AR_LIKE_i # type: ignore[arg-type] AR_u **= AR_LIKE_f # type: ignore[arg-type] AR_u **= AR_LIKE_c # type: ignore[arg-type] From 998f561a1af90083099e4410412a842c49b4f993 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 18:32:14 +0200 Subject: [PATCH 059/294] TYP: regression tests for `NDArray[integer]` inplace ops --- numpy/typing/tests/data/pass/arithmetic.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index b50d28e5fca5..3b2901cf2b51 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any +from typing import Any, cast import numpy as np import numpy.typing as npt import pytest @@ -61,6 +61,7 @@ def __rpow__(self, value: Any) -> Object: AR_b: npt.NDArray[np.bool] = np.array([True]) AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) AR_f: npt.NDArray[np.float64] = np.array([1.0]) AR_c: npt.NDArray[np.complex128] = np.array([1j]) AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) @@ -282,6 +283,10 @@ def __rpow__(self, value: Any) -> Object: AR_i *= AR_LIKE_u AR_i *= AR_LIKE_i +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + AR_f *= AR_LIKE_b AR_f *= AR_LIKE_u AR_f *= AR_LIKE_i @@ -314,6 +319,10 @@ def __rpow__(self, value: Any) -> Object: AR_i **= AR_LIKE_u AR_i **= AR_LIKE_i +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + AR_f **= AR_LIKE_b AR_f **= AR_LIKE_u AR_f **= AR_LIKE_i From 8c35aaad673e64c8806ec13141124667eb450469 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 May 2025 17:14:40 +0000 Subject: [PATCH 060/294] MAINT: Bump ossf/scorecard-action from 2.4.1 to 2.4.2 Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.4.1 to 2.4.2. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/f49aabe0b5af0936a0987cfb85d86b75731b0186...05b42c624433fc40578a4040d5cf5e36ddca8cde) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-version: 2.4.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 11a5be5f488a..f4e06677f804 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif From 32cbf09a31fc859f98b8ed7178f5ea8d95310b39 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 22:13:07 +0200 Subject: [PATCH 061/294] MAINT: bump `mypy` to `1.16.0` --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 91585a8dcb13..d2964bf78368 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.15.0 + - mypy=1.16.0 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index a2a68f044a50..4fb1d47bf50d 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -13,7 +13,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.15.0; platform_python_implementation != "PyPy" +mypy==1.16.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 87e769a196336ca1427eb2215925bf987154b316 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:22:31 +0200 Subject: [PATCH 062/294] TYP: run mypy in strict mode --- numpy/typing/tests/data/mypy.ini | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index bca203260efa..4aa465ae087b 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,9 +1,8 @@ [mypy] +strict = True enable_error_code = deprecated, ignore-without-code, truthy-bool -strict_bytes = True -warn_unused_ignores = True -implicit_reexport = False disallow_any_unimported = True -disallow_any_generics = True +allow_redefinition_new = True +local_partial_types = True show_absolute_path = True pretty = True From 51f4ac8f6188a4cd7dbb81ee4fff0cf7aef34e3e Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:23:22 +0200 Subject: [PATCH 063/294] TYP: disable mypy's `no-untyped-call` errors in the `MaskedArray` type-tests --- numpy/typing/tests/data/pass/ma.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index e7915a583210..b9be2b2e4384 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -7,6 +7,8 @@ _ScalarT = TypeVar("_ScalarT", bound=np.generic) MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +# mypy: disable-error-code=no-untyped-call + MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) From 57296fbbcba96beaf923e5b7c45bfef547d1484a Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:24:07 +0200 Subject: [PATCH 064/294] TYP: remove problematic runtime code from a `.pyi` test module --- numpy/typing/tests/data/reveal/nbit_base_example.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 33229660b6f8..66470b95bf15 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -7,8 +7,7 @@ from numpy._typing import _32Bit, _64Bit T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - return a + b +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... i8: np.int64 i4: np.int32 From f925c0cefc05e4619bd7554eb6c301e7e5934685 Mon Sep 17 00:00:00 2001 From: Guido Imperiale Date: Sat, 31 May 2025 09:26:07 +0100 Subject: [PATCH 065/294] BUG: f2py: thread-safe forcomb (#29091) --- numpy/f2py/cfuncs.py | 39 ++++++++++++++++++++++----------------- numpy/f2py/rules.py | 5 +++-- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 6c48c1ef0175..b2b1cad3d867 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -598,32 +598,37 @@ def errmess(s: str) -> None: return ii; }""" cfuncs['forcomb'] = """ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; } - forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; + cache->i[0] = cache->i_tr[nd-1] = -1; return 1; } -static int *nextforcomb(void) { +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; int j,*i,*i_tr,k; - int nd=forcombcache.nd; - if ((i=forcombcache.i) == NULL) return NULL; - if ((i_tr=forcombcache.i_tr) == NULL) return NULL; - if (forcombcache.d == NULL) return NULL; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; i[0]++; - if (i[0]==forcombcache.d[0]) { + if (i[0]==cache->d[0]) { j=1; - while ((jd[j]-1)) j++; if (j==nd) { free(i); free(i_tr); @@ -634,7 +639,7 @@ def errmess(s: str) -> None: i_tr[nd-j-1]++; } else i_tr[nd-1]++; - if (forcombcache.tr) return i_tr; + if (cache->tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index c10d2afdd097..667ef287f92b 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1184,9 +1184,10 @@ """\ int *_i,capi_i=0; CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); - if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), PyArray_NDIM(capi_#varname#_as_array),1)) { - while ((_i = nextforcomb())) + while ((_i = nextforcomb(&cache))) #varname#[capi_i++] = #init#; /* fortran way */ } else { PyObject *exc, *val, *tb; From ecb2f40970fb28b997a3e781dd2733500ebf9a6f Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Sat, 31 May 2025 04:32:42 -0400 Subject: [PATCH 066/294] PERF: Use dict instead of list to make NpzFile member existence checks constant time (#29098) Use dict instead of list to convert the passed key to the name used in the archive. --- numpy/lib/_npyio_impl.py | 64 ++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index e588b8454b44..f284eeb74834 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -195,16 +195,13 @@ def __init__(self, fid, own_fid=False, allow_pickle=False, # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) self.allow_pickle = allow_pickle self.max_header_size = max_header_size self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: @@ -240,37 +237,34 @@ def __len__(self): return len(self.files) def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs, - max_header_size=self.max_header_size) - else: - return self.zip.read(key) + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None else: - raise KeyError(f"{key} is not a file in the archive") + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read(key) def __contains__(self, key): - return (key in self._files or key in self.files) + return (key in self._files) def __repr__(self): # Get filename or default to `object` From 358a13c861d728e11f1efc18e6c04b915c682ccf Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Sat, 31 May 2025 01:48:27 -0700 Subject: [PATCH 067/294] BENCH: Increase array sizes for ufunc and sort benchmarks (#29084) --- benchmarks/benchmarks/bench_function_base.py | 2 +- benchmarks/benchmarks/bench_ufunc_strides.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 9b770aeb60bf..f72d50eb74ce 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -236,7 +236,7 @@ class Sort(Benchmark): param_names = ['kind', 'dtype', 'array_type'] # The size of the benchmarked arrays. - ARRAY_SIZE = 10000 + ARRAY_SIZE = 1000000 def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 95df16e2cb5e..0c80b1877b3a 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -10,7 +10,7 @@ class _AbstractBinary(Benchmark): params = [] param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False @@ -63,7 +63,7 @@ class _AbstractUnary(Benchmark): params = [] param_names = ['ufunc', 'stride_in', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False From 79ee0b208345696f77697c0fd6671e2763dfe51b Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Sun, 1 Jun 2025 01:08:58 +0000 Subject: [PATCH 068/294] Fix some incorrect reST markups --- doc/changelog/1.21.5-changelog.rst | 2 +- doc/neps/nep-0021-advanced-indexing.rst | 6 +++--- doc/neps/nep-0030-duck-array-protocol.rst | 4 ++-- doc/source/dev/internals.code-explanations.rst | 2 +- doc/source/reference/c-api/types-and-structures.rst | 2 +- doc/source/reference/maskedarray.generic.rst | 4 ++-- doc/source/reference/simd/build-options.rst | 2 +- doc/source/release/1.11.0-notes.rst | 2 +- doc/source/release/1.13.0-notes.rst | 2 +- doc/source/release/1.14.0-notes.rst | 4 ++-- doc/source/release/1.15.0-notes.rst | 6 +++--- doc/source/release/1.18.0-notes.rst | 4 ++-- doc/source/release/1.21.5-notes.rst | 2 +- doc/source/user/c-info.ufunc-tutorial.rst | 12 ++++++------ doc/source/user/how-to-io.rst | 2 +- numpy/lib/_polynomial_impl.py | 4 ++-- numpy/lib/_version.py | 3 +-- 17 files changed, 31 insertions(+), 32 deletions(-) diff --git a/doc/changelog/1.21.5-changelog.rst b/doc/changelog/1.21.5-changelog.rst index acd3599d48ef..04ff638d42a3 100644 --- a/doc/changelog/1.21.5-changelog.rst +++ b/doc/changelog/1.21.5-changelog.rst @@ -22,7 +22,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index badd41875af2..11ef238d6179 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -123,7 +123,7 @@ with shape ``(1,)``, not a 2D sub-matrix with shape ``(1, 1)``. Mixed indexing seems so tricky that it is tempting to say that it never should be used. However, it is not easy to avoid, because NumPy implicitly adds full slices if there are fewer indices than the full dimensionality of the indexed -array. This means that indexing a 2D array like `x[[0, 1]]`` is equivalent to +array. This means that indexing a 2D array like ``x[[0, 1]]`` is equivalent to ``x[[0, 1], :]``. These cases are not surprising, but they constrain the behavior of mixed indexing. @@ -236,7 +236,7 @@ be deduced: For the beginning, this probably means cases where ``arr[ind]`` and ``arr.oindex[ind]`` return different results give deprecation warnings. This includes every use of vectorized indexing with multiple integer arrays. - Due to the transposing behaviour, this means that``arr[0, :, index_arr]`` + Due to the transposing behaviour, this means that ``arr[0, :, index_arr]`` will be deprecated, but ``arr[:, 0, index_arr]`` will not for the time being. 7. To ensure that existing subclasses of `ndarray` that override indexing @@ -285,7 +285,7 @@ Open Questions Copying always "fixes" this possible inconsistency. * The final state to morph plain indexing in is not fixed in this PEP. - It is for example possible that `arr[index]`` will be equivalent to + It is for example possible that ``arr[index]`` will be equivalent to ``arr.oindex`` at some point in the future. Since such a change will take years, it seems unnecessary to make specific decisions at this time. diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 7fb8c9734900..4a3d268697a2 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -102,14 +102,14 @@ a complete implementation would look like the following: The implementation above exemplifies the simplest case, but the overall idea is that libraries will implement a ``__duckarray__`` method that returns the original object, and an ``__array__`` method that either creates and returns an -appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use +appropriate NumPy array, or raises a ``TypeError`` to prevent unintentional use as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary object that does not implement ``__array__``, it will create a NumPy array scalar). In case of existing libraries that don't already implement ``__array__`` but would like to use duck array typing, it is advised that they introduce -both ``__array__`` and``__duckarray__`` methods. +both ``__array__`` and ``__duckarray__`` methods. Usage ----- diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst index b1ee9b114aa8..1bb8f60528c1 100644 --- a/doc/source/dev/internals.code-explanations.rst +++ b/doc/source/dev/internals.code-explanations.rst @@ -401,7 +401,7 @@ Iterators for the output arguments are then processed. Finally, the decision is made about how to execute the looping mechanism to ensure that all elements of the input arrays are combined to produce the output arrays of the correct type. The options for loop -execution are one-loop (for :term`contiguous`, aligned, and correct data +execution are one-loop (for :term:`contiguous`, aligned, and correct data type), strided-loop (for non-contiguous but still aligned and correct data type), and a buffered loop (for misaligned or incorrect data type situations). Depending on which execution method is called for, diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 1790c4f4d04d..3f16b5f4dbc4 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1618,7 +1618,7 @@ NumPy C-API and C complex When you use the NumPy C-API, you will have access to complex real declarations ``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C standard types from ``complex.h``. Unfortunately, ``complex.h`` contains -`#define I ...`` (where the actual definition depends on the compiler), which +``#define I ...`` (where the actual definition depends on the compiler), which means that any downstream user that does ``#include `` could get ``I`` defined, and using something like declaring ``double I;`` in their code will result in an obscure compiler error like diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 9c44ebcbc589..3fbe25d5b03c 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -66,7 +66,7 @@ attributes and methods are described in more details in the .. try_examples:: -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: + The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma @@ -521,7 +521,7 @@ Numerical operations -------------------- Numerical operations can be easily performed without worrying about missing -values, dividing by zero, square roots of negative numbers, etc.:: +values, dividing by zero, square roots of negative numbers, etc.: .. try_examples:: diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 524a1532ca57..2b7039136e75 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -234,7 +234,7 @@ The need to align certain CPU features that are assured to be supported by successive generations of the same architecture, some cases: - On ppc64le ``VSX(ISA 2.06)`` and ``VSX2(ISA 2.07)`` both imply one another since the - first generation that supports little-endian mode is Power-8`(ISA 2.07)` + first generation that supports little-endian mode is ``Power-8(ISA 2.07)`` - On AArch64 ``NEON NEON_FP16 NEON_VFPV4 ASIMD`` implies each other since they are part of the hardware baseline. diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index 36cd1d65a266..f6fe84a4b17f 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -205,7 +205,7 @@ New Features - ``np.int16``, ``np.uint16``, - ``np.int32``, ``np.uint32``, - ``np.int64``, ``np.uint64``, - - ``np.int_ ``, ``np.intp`` + - ``np.int_``, ``np.intp`` The specification is by precision rather than by C type. Hence, on some platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if diff --git a/doc/source/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 3bfaf1ea5169..400c9553fbd3 100644 --- a/doc/source/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst @@ -136,7 +136,7 @@ implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept these calls correctly. Any code that did implement these will work exactly as before. Code that -invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will +invokes ``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be used instead. diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 68040b470caa..055a933291b9 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -409,8 +409,8 @@ This new default changes the float output relative to numpy 1.13. The old behavior can be obtained in 1.13 "legacy" printing mode, see compatibility notes above. -``hermitian`` option added to``np.linalg.matrix_rank`` ------------------------------------------------------- +``hermitian`` option added to ``np.linalg.matrix_rank`` +------------------------------------------------------- The new ``hermitian`` option allows choosing between standard SVD based matrix rank calculation and the more efficient eigenvalue based method for symmetric/hermitian matrices. diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index e84386f0fa5d..7aa85d167d29 100644 --- a/doc/source/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst @@ -213,7 +213,7 @@ C API changes New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` ----------------------------------------------------------------------------------- Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` -have been added and should be used in place of the ``npy_get_floatstatus``and +have been added and should be used in place of the ``npy_get_floatstatus`` and ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang were rearranging the order of operations when the previous functions were used in the ufunc SIMD functions, resulting in the floatstatus flags being checked @@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins chosen is related to the data size in this situation. -The edges returned by `histogram`` and ``histogramdd`` now match the data float type ------------------------------------------------------------------------------------- +The edges returned by ``histogram`` and ``histogramdd`` now match the data float type +------------------------------------------------------------------------------------- When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the returned edges are now of the same dtype. Previously, ``histogram`` would only return the same type if explicit bins were given, and ``histogram`` would diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index 15e0ad77f5d1..a90dbb7a67d9 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -202,9 +202,9 @@ exception will require adaptation, and code that mistakenly called Moved modules in ``numpy.random`` --------------------------------- As part of the API cleanup, the submodules in ``numpy.random`` -``bit_generator``, ``philox``, ``pcg64``, ``sfc64, ``common``, ``generator``, +``bit_generator``, ``philox``, ``pcg64``, ``sfc64``, ``common``, ``generator``, and ``bounded_integers`` were moved to ``_bit_generator``, ``_philox``, -``_pcg64``, ``_sfc64, ``_common``, ``_generator``, and ``_bounded_integers`` +``_pcg64``, ``_sfc64``, ``_common``, ``_generator``, and ``_bounded_integers`` respectively to indicate that they are not part of the public interface. (`gh-14608 `__) diff --git a/doc/source/release/1.21.5-notes.rst b/doc/source/release/1.21.5-notes.rst index c69d26771268..b3e810b51c06 100644 --- a/doc/source/release/1.21.5-notes.rst +++ b/doc/source/release/1.21.5-notes.rst @@ -33,7 +33,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 6b1aca65ed00..76e8af63462f 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -157,7 +157,7 @@ the module. return m; } -To use the ``setup.py file``, place ``setup.py`` and ``spammodule.c`` +To use the ``setup.py`` file, place ``setup.py`` and ``spammodule.c`` in the same folder. Then ``python setup.py build`` will build the module to import, or ``python setup.py install`` will install the module to your site-packages directory. @@ -240,8 +240,8 @@ and then the ``setup.py`` file used to create the module containing the ufunc. The place in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines is +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. .. code-block:: c @@ -339,7 +339,7 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py file`` for the above code. As before, the module +This is a ``setup.py`` file for the above code. As before, the module can be build via calling ``python setup.py build`` at the command prompt, or installed to site-packages via ``python setup.py install``. The module can also be placed into a local folder e.g. ``npufunc_directory`` below @@ -408,8 +408,8 @@ sections we first give the ``.c`` file and then the corresponding ``setup.py`` file. The places in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index a90fbecfdec4..81055d42b9ac 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -206,7 +206,7 @@ Human-readable :func:`numpy.save` and :func:`numpy.savez` create binary files. To **write a human-readable file**, use :func:`numpy.savetxt`. The array can only be 1- or -2-dimensional, and there's no ` savetxtz` for multiple files. +2-dimensional, and there's no ``savetxtz`` for multiple files. Large arrays ------------ diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index a58ca76ec2b0..de4c01ecb95c 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -520,9 +520,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - residuals -- sum of squared residuals of the least squares fit - rank -- the effective rank of the scaled Vandermonde - coefficient matrix + coefficient matrix - singular_values -- singular values of the scaled Vandermonde - coefficient matrix + coefficient matrix - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index f7a353868fd2..d70a61040a40 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -22,8 +22,7 @@ class NumpyVersion: - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. + '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other From 5e3aa37b1af5207a7aee113cfc5153ace3c67238 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 2 Jun 2025 08:07:35 +0200 Subject: [PATCH 069/294] MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs See https://github.com/scipy/scipy/issues/23061 for details. [skip ci] --- tools/wheels/LICENSE_linux.txt | 4 ++-- tools/wheels/LICENSE_osx.txt | 4 ++-- tools/wheels/LICENSE_win32.txt | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index 9e2d9053b8a7..db488c6cff47 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs/libgfortran*.so Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 7ef2e381874e..5cea18441b35 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index c8277e7710a2..aed96845583b 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify From 3b465b051616c2ffa299fb0313a9ef609862cf38 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 3 Jun 2025 14:04:23 +0300 Subject: [PATCH 070/294] MAINT: cleanup from finalized concatenate deprecation (#29115) --- numpy/_core/src/multiarray/multiarraymodule.c | 27 +++++-------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 022a54fe17da..7724756ba351 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -530,8 +530,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, NPY_NO_EXPORT PyArrayObject * PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, NPY_ORDER order, PyArrayObject *ret, - PyArray_Descr *dtype, NPY_CASTING casting, - npy_bool casting_not_passed) + PyArray_Descr *dtype, NPY_CASTING casting) { int iarrays; npy_intp shape = 0; @@ -647,12 +646,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, * @param ret output array to fill * @param dtype Forced output array dtype (cannot be combined with ret) * @param casting Casting mode used - * @param casting_not_passed Deprecation helper */ NPY_NO_EXPORT PyObject * PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret, PyArray_Descr *dtype, - NPY_CASTING casting, npy_bool casting_not_passed) + NPY_CASTING casting) { int iarrays, narrays; PyArrayObject **arrays; @@ -698,7 +696,7 @@ PyArray_ConcatenateInto(PyObject *op, if (axis == NPY_RAVEL_AXIS) { ret = PyArray_ConcatenateFlattenedArrays( narrays, arrays, NPY_CORDER, ret, dtype, - casting, casting_not_passed); + casting); } else { ret = PyArray_ConcatenateArrays( @@ -743,7 +741,7 @@ PyArray_Concatenate(PyObject *op, int axis) casting = NPY_SAME_KIND_CASTING; } return PyArray_ConcatenateInto( - op, axis, NULL, NULL, casting, 0); + op, axis, NULL, NULL, casting); } static int @@ -2489,7 +2487,6 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *out = NULL; PyArray_Descr *dtype = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; - PyObject *casting_obj = NULL; PyObject *res; int axis = 0; @@ -2499,22 +2496,10 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), "|axis", &PyArray_AxisConverter, &axis, "|out", NULL, &out, "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", NULL, &casting_obj, + "$casting", &PyArray_CastingConverter, &casting, NULL, NULL, NULL) < 0) { return NULL; } - int casting_not_passed = 0; - if (casting_obj == NULL) { - /* - * Casting was not passed in, needed for deprecation only. - * This should be simplified once the deprecation is finished. - */ - casting_not_passed = 1; - } - else if (!PyArray_CastingConverter(casting_obj, &casting)) { - Py_XDECREF(dtype); - return NULL; - } if (out != NULL) { if (out == Py_None) { out = NULL; @@ -2526,7 +2511,7 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), } } res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype, - casting, casting_not_passed); + casting); Py_XDECREF(dtype); return res; } From d52bccb9b1ccc80b5dbd06f1b4bfeaeba901affd Mon Sep 17 00:00:00 2001 From: Danis <96629796+DanisNone@users.noreply.github.com> Date: Tue, 3 Jun 2025 19:11:17 +0500 Subject: [PATCH 071/294] TYP: minor ufunc alias fixes in ``__init__.pyi`` (#29120) --- numpy/__init__.pyi | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index df72ce3d877a..41d7411dfdd8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4784,19 +4784,17 @@ arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divide: _UFunc_Nin2_Nout1[L['divide'], L[11], None] divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] @@ -4839,7 +4837,6 @@ matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"] matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] @@ -4863,7 +4860,6 @@ square: _UFunc_Nin1_Nout1[L['square'], L[18], None] subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] @@ -4878,10 +4874,14 @@ atanh = arctanh atan2 = arctan2 concat = concatenate bitwise_left_shift = left_shift +bitwise_not = invert bitwise_invert = invert bitwise_right_shift = right_shift +conj = conjugate +mod = remainder permute_dims = transpose pow = power +true_divide = divide class errstate: def __init__( From 2306bced4d3234e7cc8be8631b2b72773eabf7f3 Mon Sep 17 00:00:00 2001 From: abhishek-fujitsu Date: Wed, 14 May 2025 15:17:51 +0530 Subject: [PATCH 072/294] update windows-2019 to windows-2022 and meson flag[wheel build] Co-authored-by: Charles Harris --- .github/workflows/wheels.yml | 6 +++--- .github/workflows/windows.yml | 4 ++-- .github/workflows/windows_arm64.yml | 2 +- azure-pipelines.yml | 2 +- meson_cpu/x86/meson.build | 2 ++ numpy/_core/tests/test_umath.py | 7 +++++++ 6 files changed, 16 insertions(+), 7 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 3736f28cbd8c..7e034779fd0b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -89,13 +89,13 @@ jobs: # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - [macos-13, macosx_x86_64, accelerate] - [macos-14, macosx_arm64, accelerate] # always use accelerate - - [windows-2019, win_amd64, ""] - - [windows-2019, win32, ""] + - [windows-2022, win_amd64, ""] + - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] exclude: # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32, ""] + - buildplat: [windows-2022, win32, ""] python: "pp311" # Don't build PyPy arm64 windows - buildplat: [windows-11-arm, win_arm64, ""] diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6c02563150da..e760e37780a7 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -16,7 +16,7 @@ permissions: jobs: python64bit_openblas: name: x86-64, LP64 OpenBLAS - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' strategy: @@ -92,7 +92,7 @@ jobs: fail-fast: false matrix: include: - - os: windows-2019 + - os: windows-2022 architecture: x86 - os: windows-11-arm architecture: arm64 diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml index 0a691bff9b21..71fa9dd88d3b 100644 --- a/.github/workflows/windows_arm64.yml +++ b/.github/workflows/windows_arm64.yml @@ -15,7 +15,7 @@ permissions: jobs: windows_arm: - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 36362f6cacc7..af6e5cf52ac4 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -75,7 +75,7 @@ stages: - job: Windows timeoutInMinutes: 120 pool: - vmImage: 'windows-2019' + vmImage: 'windows-2022' strategy: maxParallel: 3 matrix: diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 8c7a0fb59a57..1276e922ff2a 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -212,6 +212,8 @@ if compiler_id == 'msvc' endif endforeach FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + # Add floating-point contract flag to fixes transcendental function accuracy on Windows Server 2022 + FMA3.update(args: {'val': '/fp:contract'}) AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) endif diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 4b698ce82bc6..001a7bffbcc8 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1879,8 +1879,15 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) From 9328c192239e8f288b54ecae98210440f11a7b60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 17:13:25 +0000 Subject: [PATCH 073/294] MAINT: Bump github/codeql-action from 3.28.18 to 3.28.19 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.18 to 3.28.19. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ff0a06e83cb2de871e5a09832bc6a81e7276941f...fca7ace96b7d713c7035871441bd52efbe39e27e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.19 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fb0dd766a1d8..e0318652d2af 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f4e06677f804..9e21251f87c8 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v2.1.27 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v2.1.27 with: sarif_file: results.sarif From 69948f351fcdf7a873fdb360eeaa4d84673ea90f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 3 Jun 2025 19:28:34 +0200 Subject: [PATCH 074/294] DOC: remove very outdated info on ATLAS (#29119) * DOC: remove very outdated info on ATLAS ATLAS hasn't been developed for years, there is no reason to ever use it instead of OpenBLAS, BLIS, or MKL. So remove mentions of it. The troubleshooting instructions haven't been relevant in quite a while either. Addresses a comment on gh-29108 [skip cirrus] [skip github] [skip azp] * accept review suggestion [skip ci] Co-authored-by: Matti Picus --------- Co-authored-by: Matti Picus --- INSTALL.rst | 12 ++-------- .../user/troubleshooting-importerror.rst | 22 ------------------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 017e4de8c9d4..6e9d2cd242f5 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -135,12 +135,8 @@ For best performance, a development package providing BLAS and CBLAS should be installed. Some of the options available are: - ``libblas-dev``: reference BLAS (not very optimized) -- ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to - the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for - instructions -- ``libopenblas-base``: fast and runtime detected so no tuning required but a - very recent version is needed (>=0.2.15 is recommended). Older versions of - OpenBLAS suffered from correctness issues on some CPUs. +- ``libopenblas-base``: (recommended) OpenBLAS is performant, and used + in the NumPy wheels on PyPI except where Apple's Accelerate is tuned better for Apple hardware The package linked to when numpy is loaded can be chosen after installation via the alternatives mechanism:: @@ -148,10 +144,6 @@ the alternatives mechanism:: update-alternatives --config libblas.so.3 update-alternatives --config liblapack.so.3 -Or by preloading a specific BLAS library with:: - - LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... - Build issues ============ diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 6be8831d9c2a..da456dd17e36 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -83,28 +83,6 @@ on how to properly configure Eclipse/PyDev to use Anaconda Python with specific conda environments. -Raspberry Pi ------------- - -There are sometimes issues reported on Raspberry Pi setups when installing -using ``pip3 install`` (or ``pip`` install). These will typically mention:: - - libf77blas.so.3: cannot open shared object file: No such file or directory - - -The solution will be to either:: - - sudo apt-get install libatlas-base-dev - -to install the missing libraries expected by the self-compiled NumPy -(ATLAS is a possible provider of linear algebra). - -*Alternatively* use the NumPy provided by Raspbian. In which case run:: - - pip3 uninstall numpy # remove previously installed version - apt install python3-numpy - - Debug build on Windows ---------------------- From 8944b2b800f5bfcf48a6b0134a2588f3a5a54d66 Mon Sep 17 00:00:00 2001 From: Marc Redemske Date: Tue, 3 Jun 2025 23:21:09 +0200 Subject: [PATCH 075/294] DOC: fix typo in Numpy's module structure --- doc/source/reference/module_structure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 01a5bcff7fbc..83e697ac04fe 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either either +only the main namespace and a smaller set of submodules. The rest either has special-purpose or niche namespaces. Main namespaces From 6822a7b10f21c895ca0037a9b9060bb944fb7f96 Mon Sep 17 00:00:00 2001 From: Marc Redemske Date: Wed, 4 Jun 2025 07:16:33 +0200 Subject: [PATCH 076/294] update according to review --- doc/source/reference/module_structure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 83e697ac04fe..98e3dda54e7b 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either has +only the main namespace and a smaller set of submodules. The rest either have special-purpose or niche namespaces. Main namespaces From 6ab549012852157727097c52328bad56cfd85d46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Jun 2025 18:00:32 +0000 Subject: [PATCH 077/294] MAINT: Bump conda-incubator/setup-miniconda from 3.1.1 to 3.2.0 Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.1.1 to 3.2.0. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/505e6394dae86d6a5c7fbb6e3fb8938e3e863830...835234971496cad1653abb28a638a281cf32541f) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-version: 3.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/macos.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 1388a756d216..418dc7d52fc1 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -55,7 +55,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 7e034779fd0b..df0c779241f3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -280,7 +280,7 @@ jobs: name: sdist path: ./dist/* - - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: # for installation of anaconda-client, required for upload to # anaconda.org From a88d0149ae91b301696f507f350499207d11f442 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 5 Jun 2025 11:00:59 +0200 Subject: [PATCH 078/294] MAINT: Bump ``scipy-doctest`` to 1.8 (#29085) * MAINT: check-docs: require scipy-doctest >= 1.8.0 See for the discussion https://discuss.scientific-python.org/t/scipy-doctest-select-only-doctests-or-both-doctests-and-unit-tests/1950 [docs only] * CI: bump scipy-doctest version on CI [skip azp] [skip actions] [skip cirrus] * MAINT: tweak doctests to not fail --- .github/workflows/linux.yml | 2 +- .spin/cmds.py | 3 +++ numpy/_core/_add_newdocs.py | 2 +- numpy/lib/introspect.py | 2 +- requirements/doc_requirements.txt | 2 +- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 742ca5c34144..668c1191d055 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -271,7 +271,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest==1.6.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pytz pandas # spin check-docs -v # spin check-tutorials -v diff --git a/.spin/cmds.py b/.spin/cmds.py index e5ae29d4a6a2..66885de630e0 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -196,6 +196,8 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): import scipy_doctest # noqa: F401 except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e + if scipy_doctest.__version__ < '1.8.0': + raise ModuleNotFoundError("please update scipy_doctests to >= 1.8.0") if (not pytest_args): pytest_args = ('--pyargs', 'numpy') @@ -203,6 +205,7 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): # turn doctesting on: doctest_args = ( '--doctest-modules', + '--doctest-only-doctests=true', '--doctest-collect=api' ) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 8f5de4b7bd89..597d5c6deaf3 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -5944,7 +5944,7 @@ >>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) - {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)} + {'name': (dtype('>> import json - >>> print(json.dumps(dict, indent=2)) + >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 330f0f7ac8b9..23a0e6deb60f 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -19,7 +19,7 @@ toml # for doctests, also needs pytz which is in test_requirements -scipy-doctest==1.6.0 +scipy-doctest>=1.8.0 # interactive documentation utilities # see https://github.com/jupyterlite/pyodide-kernel#compatibility From 7fbc71c354c21c2264d79e3657829eb2608f4dbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Jun 2025 17:05:30 +0000 Subject: [PATCH 079/294] MAINT: Bump mamba-org/setup-micromamba from 2.0.4 to 2.0.5 Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 2.0.4 to 2.0.5. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/0dea6379afdaffa5d528b3d1dabc45da37f443fc...b09ef9b599704322748535812ca03efb2625677b) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-version: 2.0.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- .github/workflows/windows_arm64.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index df0c779241f3..f74be5f4a455 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -187,7 +187,7 @@ jobs: path: ./wheelhouse/*.whl - name: install micromamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml index 71fa9dd88d3b..3eaf02eb062c 100644 --- a/.github/workflows/windows_arm64.yml +++ b/.github/workflows/windows_arm64.yml @@ -174,7 +174,7 @@ jobs: path: ./*.whl - name: Setup Mamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b with: # for installation of anaconda-client, required for upload to # anaconda.org From e81abf7f0ab8c0eba1541e35d048469731c4c6a2 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Jun 2025 10:01:04 -0600 Subject: [PATCH 080/294] MAINT: Update main after 2.3.0 release. - Forwardport 2.3.0-changelog.rst - Forwardport 2.3.0-notes.rst - Forwardport .mailmap [skip azp] [skip cirrus] [skip actions] --- .mailmap | 39 +- doc/changelog/2.3.0-changelog.rst | 704 +++++++++++++++++++++++++++++ doc/source/release/2.3.0-notes.rst | 517 ++++++++++++++++++++- 3 files changed, 1254 insertions(+), 6 deletions(-) create mode 100644 doc/changelog/2.3.0-changelog.rst diff --git a/.mailmap b/.mailmap index f33dfddb6492..e3e3bb56ecdf 100644 --- a/.mailmap +++ b/.mailmap @@ -11,6 +11,7 @@ !Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> !DWesl <22566757+DWesl@users.noreply.github.com> !Dreamge +!EarlMilktea <66886825+EarlMilktea@users.noreply.github.com> !Endolith !GalaxySnail !Illviljan <14371165+Illviljan@users.noreply.github.com> @@ -21,6 +22,7 @@ !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays !amagicmuffin <2014wcheng@gmail.com> +!amotzop !bersbersbers <12128514+bersbersbers@users.noreply.github.com> !code-review-doctor !cook-1229 <70235336+cook-1229@users.noreply.github.com> @@ -34,6 +36,7 @@ !hutauf !jbCodeHub !juztamau5 +!karl3wm !legoffant <58195095+legoffant@users.noreply.github.com> !liang3zy22 <35164941+liang3zy22@users.noreply.github.com> !luzpaz @@ -51,6 +54,7 @@ !pmvz !pojaghi <36278217+pojaghi@users.noreply.github.com> !pratiklp00 +!samir539 !sfolje0 !spacescientist !stefan6419846 @@ -59,12 +63,15 @@ !tautaus !undermyumbrella1 !vahidmech +!wenlong2 !xoviat <49173759+xoviat@users.noreply.github.com> !xoviat <49173759+xoviat@users.noreply.github.com> !yan-wyb !yetanothercheer Aaron Baecker Adrin Jalali +Abhishek Kumar +Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> Abraham Medina Arun Kota Arun Kota Arun Kota @@ -140,6 +147,8 @@ Anton Prosekin Anže Starič Arfy Slowy Arnaud Ma +Arnaud Tremblay +Arnaud Tremblay <59627629+Msa360@users.noreply.github.com> Aron Ahmadia Arun Kota Arun Kota @@ -190,6 +199,8 @@ Carl Kleffner Carl Leake Carlos Henrique Hermanny Moreira da Silva Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> +Carlos Martin +Carlos Martin Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -200,6 +211,8 @@ Chris Burns Chris Fu (傅立业) <17433201@qq.com> Chris Holland <41524756+ChrisAHolland@users.noreply.github.com> Chris Kerr +Chris Navarro +Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris Christian Clauss Christopher Dahlin @@ -270,6 +283,7 @@ Eric Fode Eric Fode Eric Quintero Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan +Ernst Peng Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> Eskild Eriksen @@ -300,8 +314,11 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Gyeongjae Choi Habiba Hye Habiba Hye <145866308+HabibiHye@users.noreply.github.com> +Halle Loveday +Halle Loveday Hameer Abbasi Hannah Aizenman Han Genuit @@ -311,6 +328,10 @@ Helder Oliveira Hemil Desai Himanshu Hiroyuki V. Yamazaki +Daniel Hrisca +Daniel Hrisca +François de Coatpont +François de Coatpont <93073405+Chevali2004@users.noreply.github.com> Hugo van Kemenade Iantra Solari I-Shen Leong @@ -363,6 +384,7 @@ Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Jiuding Tan (谭九鼎) <109224573@qq.com> Johann Faouzi Johann Rohwer Johann Rohwer jmrohwer @@ -447,10 +469,13 @@ Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> +Makima C. Yang Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> +Marco Edward Gorelli +Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> Mark DePristo @@ -508,6 +533,7 @@ Michael Schnaitter Michael Seifert Michel Fruchart +Mike O'Brien Mike Toews Miki Watanabe (渡邉 美希) Miles Cranmer @@ -516,9 +542,12 @@ Milica Dančuk love-bees <33499899+love-bees@users.noreply.g Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohammed Abdul Rahman +Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Mohaned Qunaibit Muhammad Kasim Muhammed Muhsin +Mugundan Selvanayagam Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi @@ -571,6 +600,7 @@ Peter J Cock Peter Kämpf Peyton Murray Phil Elson +Filipe Laíns Pierre GM Pierre GM pierregm Piotr Gaiński @@ -591,6 +621,8 @@ Rehas Sachdeva Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma +Rob Timpe +Rob Timpe Robert Kern Robert LU Robert T. McGibbon @@ -660,6 +692,7 @@ Steve Stagg Steven J Kern Stuart Archibald Stuart Archibald +SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P Sylvain Ferriol Takanori Hirano @@ -696,6 +729,8 @@ Vinith Kishore <85550536+vinith2@users.noreply.github Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> +Wang Yang (杨旺) +Wang Yang (杨旺) <1113177880@qq.com> Wansoo Kim Warrick Ball Warrick Ball @@ -711,11 +746,11 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau -Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi Younes Sandi <65843206+Unessam@users.noreply.github.com> @@ -723,6 +758,8 @@ Yu Feng Yuji Kanagawa Yuki K Yury Kirienko +Yuvraj Pradhan +Yuvraj Pradhan Zac Hatfield-Dodds Zach Brugh <111941670+zachbrugh@users.noreply.github.com> Zé Vinícius diff --git a/doc/changelog/2.3.0-changelog.rst b/doc/changelog/2.3.0-changelog.rst new file mode 100644 index 000000000000..7ca672ba8dbf --- /dev/null +++ b/doc/changelog/2.3.0-changelog.rst @@ -0,0 +1,704 @@ + +Contributors +============ + +A total of 134 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !EarlMilktea + +* !amotzop + +* !fengluoqiuwu +* !h-vetinari +* !karl3wm + +* !partev +* !samir539 + +* !wenlong2 + +* Aarni Koskela + +* Abdu Zoghbi + +* Abhishek Kumar +* Adam J. Stewart +* Aditi Juneja +* Adrin Jalali +* Agriya Khetarpal +* Alicia Boya García + +* Andrej Zhilenkov +* Andrew Nelson +* Angus Gibson + +* Antonio Rech Santos + +* Ari Gato + +* Arnaud Tremblay + +* Arvid Bessen + +* Baskar Gopinath + +* Carlos Martin + +* Charles Harris +* Chris Navarro + +* Chris Sidebottom +* Christian Lorentzen +* Christine P. Chai + +* Christopher Sidebottom +* Clément Robert +* Colin Gilgenbach + +* Craig Peters + +* Cédric Hannotier +* Daniel Hrisca +* Derek Homeier +* Diego Baldassar + +* Dimitri Papadopoulos Orfanos +* Eoghan O'Connell + +* Eric Larson +* Ernst Peng + +* Evgeni Burovski +* Filipe Laíns +* François Rozet + +* François de Coatpont + +* GUAN MING +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Guido Imperiale + +* Gyeongjae Choi + +* Halle Loveday + +* Hannah Wheeler + +* Hao Chen + +* Harmen Stoppels + +* Hin-Tak Leung + +* Ian DesJardin + +* Ihar Hrachyshka + +* Ilhan Polat +* Inessa Pawson +* J. Steven Dodge + +* Jake VanderPlas +* Jiachen An + +* Jiuding Tan (谭九鼎) +* Joe Rickerby + +* John Kirkham +* John Stilley + +* Jonathan Albrecht + +* Joren Hammudoglu +* Kai Germaschewski + +* Krishna Bindumadhavan + +* Lucas Colley +* Luka Krmpotić + +* Lysandros Nikolaou +* Maanas Arora +* Makima C. Yang + +* Marco Barbosa + +* Marco Edward Gorelli + +* Mark Harfouche +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matthew Brett +* Matthew Goldsberry + +* Matthew Sterrett +* Matthias Diener +* Matthieu Darbois +* Matti Picus +* Melissa Weber Mendonça +* Michael Siebert +* Mike O'Brien + +* Mohammed Abdul Rahman + +* Mugundan Selvanayagam + +* Musharaf Aijaz Baba + +* Musharraffaijaz + +* Nathan Goldbaum +* Nicholas Christensen + +* Nitish Satyavolu + +* Omid Rajaei +* PTUsumit + +* Peter Hawkins +* Peyton Murray +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Rob Timpe + +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Roy Smart +* Saransh Chopra +* Saraswathy Kalaiselvan + +* Sayed Adel +* Sebastian Berg +* Shantanu Jain + +* Shashwat Pandey + +* Shi Entong + +* Simon Altrogge +* Stan Ulbrych +* Thomas A Caswell +* Théotime Grohens + +* Tyler Reddy +* WANG Xuerui + +* WEN Hao + +* Wang Yang (杨旺) + +* Warren Weckesser +* Warrick Ball +* William Andrea +* Yakov Danishevsky + +* Yichi Zhang + +* Yuvraj Pradhan + +* dependabot[bot] +* hfloveday12 + + +Pull requests merged +==================== + +A total of 556 pull requests were merged for this release. + +* `#22718 `__: DOC: Add docs on using GitHub Codespaces for NumPy development +* `#25675 `__: ENH: add matvec and vecmat gufuncs +* `#25934 `__: ENH: Convert tanh from C universal intrinsics to C++ using Highway +* `#25991 `__: ENH: Optimize polyutils as_series +* `#26018 `__: ENH add hash based unique +* `#26745 `__: ENH, DOC: Add support for interactive examples for NumPy with... +* `#26958 `__: BUG: index overlap copy +* `#27288 `__: BUG: Scalar array comparison should return np.bool +* `#27300 `__: CI: pycodestyle → ruff +* `#27309 `__: MNT: Enforce ruff/Pyflakes rules (F) +* `#27324 `__: DOC: Removing module name from by-topic docs +* `#27343 `__: ENH: Add support for flat indexing on flat iterator +* `#27404 `__: DOC: document type promotion with Python types +* `#27522 `__: ENH: Cleanup npy_find_array_wrap +* `#27523 `__: ENH: Improve performance of np.count_nonzero for float arrays +* `#27648 `__: MAINT: Fix the code style to our C-Style-Guide +* `#27738 `__: DEP: testing: disable deprecated use of keywords x/y +* `#27784 `__: BUG: ``sinc``\ : fix underflow for float16 +* `#27789 `__: ENH: Implement np.strings.slice as a gufunc +* `#27819 `__: CI: add windows free-threaded CI +* `#27823 `__: BEG, MAINT: Begin NumPy 2.3.0 development. +* `#27824 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27826 `__: CI: update circleci to python3.11.10, limit parallel builds. +* `#27827 `__: CI: skip ninja installation in linux_qemu workflows +* `#27830 `__: ENH: speedup evaluation of numpy.polynomial.legendre.legval. +* `#27839 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27841 `__: BUG: Never negate strides in reductions (for now) +* `#27847 `__: MAINT: Bump pypa/cibuildwheel from 2.21.3 to 2.22.0 +* `#27848 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.1 to 2.0.2 +* `#27850 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27854 `__: MAINT: Use mask_store instead of store for compiler bug workaround +* `#27856 `__: SIMD: add lsx optimization for loongarch, and add Qemu tests +* `#27858 `__: DOC: Fix typo +* `#27860 `__: MAINT: Add helper for static or heap allocated scratch space +* `#27862 `__: MAINT: Drop Python 3.10 support. +* `#27864 `__: ENH: stack custom multiarray import exception with the original... +* `#27868 `__: BUG: fix importing numpy in Python's optimized mode +* `#27869 `__: TYP: Fix ``np.interp`` signature for scalar types +* `#27875 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27877 `__: ENH: Refactor ``__qualname__`` across API +* `#27878 `__: DOC: Fix double import in docs +* `#27879 `__: DEV: Add venv files to .gitignore +* `#27883 `__: MAINT,ENH: Reorganize buffered iteration setup +* `#27884 `__: ENH: Remove unnecessary list collection +* `#27886 `__: MAINT: Move uint aligned check to actual transfer function setup +* `#27887 `__: MAINT: A few other small nditer fixes +* `#27896 `__: PERF: improve multithreaded ufunc scaling +* `#27897 `__: MAINT: Bump github/codeql-action from 3.27.5 to 3.27.6 +* `#27898 `__: MAINT: Remove ``25675.new_feature.rst`` snippet. +* `#27899 `__: TST: add timeouts for github actions tests and wheel builds. +* `#27901 `__: MAINT: simplify power fast path logic +* `#27910 `__: MAINT: Make qualname tests more specific and fix code where needed +* `#27914 `__: DOC: Remove 27896-snippet. +* `#27915 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 +* `#27917 `__: CI: Use hashes in specifying some actions. +* `#27920 `__: DOC: Fix invalid URL in the index.rst file. +* `#27921 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#27922 `__: MAINT: Move user pointers out of axisdata and simplify iternext +* `#27923 `__: ENH: Add cython wrappers for NpyString API +* `#27927 `__: DOC: Use internal/intersphinx links for neps. +* `#27930 `__: MAINT: Fix cirrus MacOs wheel builds [wheel build] +* `#27931 `__: CI: audit with zizmor +* `#27933 `__: BUG: fix building numpy on musl s390x +* `#27936 `__: MAINT: Update main after 2.2.0 release. +* `#27940 `__: BUG: Fix potential inconsistent behaviour for high-demnsional... +* `#27943 `__: TEST: cleanups +* `#27947 `__: BUG:fix compile error libatomic link test to meson.build +* `#27955 `__: BUG: fix use-after-free error in npy_hashtable.cpp +* `#27956 `__: BLD: add missing include to fix build with freethreading +* `#27962 `__: MAINT: Bump github/codeql-action from 3.27.6 to 3.27.7 +* `#27963 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.2 to 2.0.3 +* `#27967 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27973 `__: MAINT: Apply assorted ruff/flake8-pie rules (PIE) +* `#27974 `__: MAINT: Apply ruff/flake8-implicit-str-concat rules (ISC) +* `#27975 `__: MAINT: Apply ruff/flake8-comprehensions rules (C4) +* `#27976 `__: MAINT: Apply assorted ruff/flake8-pyi rules (PYI) +* `#27978 `__: MAINT: Apply assorted ruff/flake8-simplify rules (SIM) +* `#27981 `__: DOC: Document abi3 compat +* `#27992 `__: BUG: Fix segfault in stringdtype lexsort +* `#27996 `__: MAINT: Bump github/codeql-action from 3.27.7 to 3.27.9 +* `#27997 `__: MAINT: Remove unnecessary (and not safe in free-threaded) 1-D... +* `#27998 `__: API,MAINT: Make ``NpyIter_GetTransferFlags`` public and avoid... +* `#27999 `__: DOC, MAINT: Fix typos found by codespell +* `#28001 `__: DOC: Fix documentation for np.dtype.kind to include 'T' for StringDType +* `#28003 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28004 `__: DOC: fix several doctests in dtype method docstrings +* `#28005 `__: BUG: Cython API was missing NPY_UINTP. +* `#28008 `__: BUG: Fix handling of matrix class in np.unique. +* `#28009 `__: TST: lib: Test average with object dtype and no weights. +* `#28013 `__: DOC: Fixed typos in development_advanced_debugging.rst +* `#28015 `__: MAINT: run ruff from the repository root +* `#28020 `__: CI: pin scipy-doctest to 1.5.1 +* `#28022 `__: MAINT: Add all submodules to ruff exclusion list. +* `#28023 `__: DOC: update to scipy-doctest 1.6.0 and fix tests +* `#28029 `__: MAINT: Bump actions/upload-artifact from 4.4.3 to 4.5.0 +* `#28032 `__: BUG,MAINT: Fix size bug in new alloc helper and use it in one... +* `#28033 `__: MAINT: Use userpointers to avoid NPY_MAXARGS in iternext() +* `#28035 `__: MAINT: Move ``lib.format`` and ``ctypeslib`` to submodules/private... +* `#28036 `__: Replace Twitter with X +* `#28039 `__: TYP: allow ``None`` in operand sequence of nditer +* `#28043 `__: BUG: Ensure einsum uses chunking (now that nditer doesn't) +* `#28051 `__: MAINT: Update main after 2.2.1 release. +* `#28053 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28056 `__: BUG: Fix f2py directives and ``--lower`` casing +* `#28058 `__: MAINT: Update ``spin lint`` command +* `#28060 `__: CI: fix check that GIL remains disabled for free-threaded wheels +* `#28065 `__: TYP: fix unnecessarily broad ``integer`` binop return types +* `#28069 `__: MAINT: update NEP 29 +* `#28073 `__: TYP: use mypy_primer to surface type checking regressions +* `#28074 `__: DOC: clarify np.gradient varargs requirement for axis parameter +* `#28075 `__: MAINT: Replace usage of fixed strides with inner strides in einsum +* `#28080 `__: ENH: Allow an arbitrary number of operands in nditer +* `#28081 `__: DOC: Add release snippets for iteration changes +* `#28083 `__: MAINT: Update LICENSE Copyright to 2025 +* `#28088 `__: BUG: update check for highway compiler support +* `#28089 `__: MAINT: bump ``mypy`` to ``1.14.1`` +* `#28090 `__: DOC:Fixed docstring with example use of np.select +* `#28091 `__: MAINT: Refactor stringdtype casts.c to use cpp templates +* `#28092 `__: MAINT: LoongArch: switch away from the __loongarch64 preprocessor... +* `#28094 `__: DOC: Fix documentation example for numpy.ma.masked +* `#28100 `__: DOC: Move linalg.outer from Decompositions to Matrix and vector... +* `#28101 `__: DOC: Fix sphinx markup in source/reference/random/extending.rst +* `#28102 `__: MAINT: update oldest supported GCC version from 8.4 to 9.3 +* `#28103 `__: MAINT: random: Call np.import_array() in _examples/cython/extending_distribution... +* `#28105 `__: ENH: support no-copy pickling for any array that can be transposed... +* `#28108 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28109 `__: TYP: Fix the incorrect ``bool`` return type of ``issubdtype`` +* `#28110 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28116 `__: MAINT: random: Explicitly cast RAND_INT_MAX to double to avoid... +* `#28118 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28121 `__: MAINT: Correct NumPy 2.3 C-API versioning and version information +* `#28123 `__: BUG: move reduction initialization to ufunc initialization +* `#28127 `__: DOC: Improve slice docstrings +* `#28128 `__: BUG: Don't use C99 construct in import_array +* `#28129 `__: DEP: Deprecate ``numpy.typing.mypy_plugin`` +* `#28130 `__: CI: Fix mypy_primer comment workflow +* `#28133 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28134 `__: DEP: Deprecate ``numpy.typing.mypy_plugin``\ : The sequel +* `#28141 `__: DOC: Add instructions to build NumPy on WoA +* `#28142 `__: ENH: inline UTF-8 byte counter and make it branchless +* `#28144 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28148 `__: MAINT: Replace usage of outdated fixed strides with inner strides... +* `#28149 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28154 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28161 `__: DOC: Clarify ``np.loadtxt`` encoding argument default value in... +* `#28163 `__: MAINT: Avoid a redundant copy on ``a[...] = b`` +* `#28167 `__: DOC: fix formatting typo in basics.copies.rst +* `#28168 `__: TYP: Fix overlapping overloads issue in "2 in, 1 out" ufuncs +* `#28169 `__: TYP: preserve shape-type in ``ndarray.astype()`` +* `#28170 `__: TYP: Fix missing and spurious top-level exports +* `#28172 `__: BUG: Include Python-including headers first +* `#28179 `__: DOC: Remove duplicate wishlist tab in NEPs. +* `#28180 `__: DOC: Update links in HOWTO_RELEASE.rst +* `#28181 `__: CI: replace quansight-labs/setup-python with astral-sh/setup-uv +* `#28183 `__: MAINT: testing: specify python executable to use in extbuild +* `#28186 `__: MAINT: Update main after 2.2.2 release. +* `#28189 `__: MAINT, DOC: Add sphinx extension to allow svg images in PDF docs... +* `#28202 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28203 `__: BUG: fix data race in ``np.repeat`` +* `#28206 `__: BUG: Remove unnecessary copying and casting from out array in... +* `#28210 `__: corrected the numpy logo visibility issues on darkmode with the... +* `#28211 `__: MAINT: Hide decorator from pytest traceback +* `#28214 `__: ENH: add pkg_config entrypoint +* `#28219 `__: DOC: Add versionadded directive for axis argument in trim_zeros... +* `#28221 `__: BUG: allclose does not warn for invalid value encountered in... +* `#28222 `__: MAINT: Update highway to latest +* `#28223 `__: MAINT: Add [[maybe_unused] to silence some warnings +* `#28226 `__: DOC: Clarify ``__array__`` protocol arguments +* `#28228 `__: BUG: handle case when StringDType na_object is nan in float to... +* `#28229 `__: DOC: Fix a typo in doc/source/dev/development_workflow.rst +* `#28230 `__: DOC: FIx a link in Roadmap +* `#28231 `__: DOC: Fix external links in the navbar of neps webpage +* `#28232 `__: BUG: Fix float128 FPE handling on ARM64 with Clang compiler +* `#28234 `__: BUG: Add cpp atomic support +* `#28235 `__: MAINT: Compile fix for clang-cl on WoA +* `#28241 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28242 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28246 `__: BLD: better fix for clang / ARM compiles +* `#28250 `__: dtype.__repr__: prefer __name__ for user-defined types. +* `#28252 `__: test_casting_unittests.py: remove tuple +* `#28254 `__: MAINT: expire deprecations +* `#28258 `__: DOC: Change the scientific page link in NumPy/MATLAB +* `#28259 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28262 `__: TYP: expire deprecations +* `#28263 `__: ENH: Add ARM64 (aarch64) CI testing +* `#28264 `__: DOC: Remove an invalid link in f2py-examples.rst +* `#28270 `__: TYP: Fixed missing typing information of set_printoptions +* `#28273 `__: CI: update sanitizer CI to use python compiled with ASAN and... +* `#28276 `__: BUG: fix incorrect bytes to StringDType coercion +* `#28279 `__: TYP: Fix scalar constructors +* `#28280 `__: TYP: stub ``numpy.matlib`` +* `#28281 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28288 `__: DOC: Correct a typo in Intel License URL +* `#28290 `__: BUG: fix race initializing legacy dtype casts +* `#28291 `__: BUG: Prevent class-bound attr mutation in ``lib._iotools.NameValidator`` +* `#28294 `__: MAINT: Enable building tanh on vector length agnostic architectures +* `#28295 `__: TYP: stub ``numpy._globals`` +* `#28296 `__: TYP: stub ``numpy._expired_attrs_2_0`` +* `#28297 `__: TYP: stub ``numpy._configtool`` and ``numpy._distributor_init`` +* `#28298 `__: TYP: stub ``numpy.lib._iotools`` +* `#28299 `__: TYP: stub ``lib.user_array`` and ``lib._user_array_impl`` +* `#28300 `__: TYP: stub ``lib.introspect`` +* `#28301 `__: TYP: stub ``lib.recfunctions`` +* `#28302 `__: TYP: fix and improve ``numpy._core.arrayprint`` +* `#28303 `__: TYP: stub ``lib._datasource`` and fix ``lib._npyio_impl`` +* `#28304 `__: DOC: Remove reference to python2 +* `#28307 `__: MAINT: bump ``mypy`` to ``1.15.0`` +* `#28312 `__: DOC: remove references to Python 2 +* `#28319 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28320 `__: MAINT: Update actions/cache and use hash. +* `#28323 `__: DOC: Correct a typo in Exception TooHardError +* `#28327 `__: TYP: fix positional- and keyword-only params in ``astype``\ ,... +* `#28328 `__: CI: Update FreeBSD base image in ``cirrus_arm.yml`` +* `#28330 `__: ENH: Ensure ``lib._format_impl.read_array`` handles file reading... +* `#28332 `__: BUG: avoid segfault in np._core.multiarray.scalar +* `#28335 `__: MAINT: Update main after 2.2.3 release. +* `#28336 `__: DOC: Update link to Anaconda Eclipse/PyDev documentation +* `#28338 `__: MAINT: use OpenBLAS 0.3.29 +* `#28339 `__: MAIN: Update c,c++ line length to 88 +* `#28343 `__: BUG: Fix ``linalg.norm`` to handle empty matrices correctly. +* `#28350 `__: DOC: fix typo +* `#28353 `__: DOC: Make numpy.fft a clickable link to module +* `#28355 `__: BUG: safer bincount casting +* `#28358 `__: MAINT: No need to check for check for FPEs in casts to/from object +* `#28359 `__: DOC: Make the first paragraph more concise in internals.rst +* `#28361 `__: BUG: Make np.nonzero threading safe +* `#28370 `__: DOC: Revise bullet point formatting in ``arrays.promotions.rst`` +* `#28382 `__: DOC: fix C API docs for ``PyArray_Size`` +* `#28383 `__: DOC: Added links to CTypes and CFFI in Numba +* `#28386 `__: MAINT: Extend the default ruff exclude files +* `#28387 `__: DOC: fix expected exception from StringDType without string coercion +* `#28390 `__: MAINT: speed up slow test under TSAN +* `#28391 `__: CI: use free-threaded build for ASAN tests +* `#28392 `__: CI: build Linux aarch64 wheels on GitHub Actions +* `#28393 `__: BUG: Fix building on s390x with clang +* `#28396 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28404 `__: MAINT: remove legacy ucsnarrow module +* `#28406 `__: BUG: Include Python.h first +* `#28407 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28408 `__: DOC: Update link to Nix in Cross Compilation +* `#28411 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28413 `__: DOC: add scimath in np.lib submodules listing +* `#28414 `__: DOC: Add missing punctuation to the random sampling page +* `#28415 `__: BLD: update cibuildwheel and build PyPy 3.11 wheels [wheel build] +* `#28421 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28426 `__: BUG: Limit the maximal number of bins for automatic histogram... +* `#28427 `__: DOC: remove mention of Poly.nickname +* `#28431 `__: MAINT: PY_VERSION_HEX simplify +* `#28436 `__: BUILD: move to manylinux_2_28 wheel builds +* `#28437 `__: DOC: fix documentation for Flag checking functions and macros +* `#28442 `__: ENH: Check for floating point exceptions in dot +* `#28444 `__: DOC: fix URL redirects +* `#28447 `__: DOC: repositioned bitwise_count under bit-wise operations +* `#28451 `__: DOC: Add -avx512_spr to disable AVX512 in build options +* `#28452 `__: TYP: stub ``random._pickle`` +* `#28453 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28455 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.3 to 2.0.4 +* `#28456 `__: MAINT: Bump actions/cache from 4.2.0 to 4.2.2 +* `#28458 `__: MAINT: Bump actions/upload-artifact from 4.5.0 to 4.6.1 +* `#28459 `__: MAINT: Bump github/codeql-action from 3.27.9 to 3.28.11 +* `#28460 `__: MAINT: Bump astral-sh/setup-uv from 5.2.1 to 5.3.1 +* `#28461 `__: MAINT: Update dependabot.yml file +* `#28462 `__: TYP: Add specializations to meshgrid stubs +* `#28464 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28465 `__: MAINT: Bump ossf/scorecard-action from 2.4.0 to 2.4.1 +* `#28466 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#28467 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.0 to 3.1.1 +* `#28468 `__: MAINT: Bump actions/download-artifact from 4.1.8 to 4.1.9 +* `#28473 `__: DOC: add typing badge to README +* `#28475 `__: MAINT: skip slow_pypy tests on pypy +* `#28477 `__: MAINT: fix typo in normal distribution functions docstrings +* `#28480 `__: ENH: Convert logical from C universal intrinsics to C++ using... +* `#28483 `__: DOC: only change tp_name on CPython +* `#28485 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28488 `__: fix aarch64 CI run +* `#28489 `__: MAINT: Enable building loop_logical on vector length agnostic... +* `#28491 `__: TYP: fix typing errors in ``_core.shape_base`` +* `#28492 `__: TYP: fix typing errors in ``_core.strings`` +* `#28494 `__: TYP: fix typing errors in ``_core.records`` +* `#28495 `__: DOC: let docstring mention that unique_values is now unsorted +* `#28497 `__: TYP: don't use literals in shape-types +* `#28498 `__: TYP: accept non-integer shapes in array constructor without a... +* `#28499 `__: TYP: remove unneseccary cast +* `#28500 `__: TYP: stub ``numpy.random._bounded_integers`` +* `#28502 `__: TYP: stub ``numpy.random._common`` +* `#28503 `__: API: Always allow ``sorted=False`` and make a note about it +* `#28505 `__: TYP: stub ``numpy._core.umath`` +* `#28506 `__: TYP: fix typing errors in ``numpy.lib._arrayterator_impl`` +* `#28507 `__: MAINT: remove ``ma.timer_comparison`` +* `#28508 `__: TYP: fix signatures of ``ndarray.put`` and ``ndarray.view`` +* `#28509 `__: TYP: annotate the missing ``ufunc.resolve_dtypes`` method +* `#28511 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28513 `__: TYP: stub ``numpy._core.overrides`` +* `#28514 `__: TYP: stub ``numpy._utils`` +* `#28515 `__: TYP: stub ``numpy._core._dtype[_ctypes]`` +* `#28517 `__: TYP: stub the remaining ``numpy._core.\*`` modules +* `#28518 `__: TYP: stub the missing submodules of ``numpy.linalg`` +* `#28519 `__: TYP: stub ``numpy._pyinstaller`` +* `#28520 `__: TYP: stub ``numpy.fft.helper`` (deprecated) +* `#28522 `__: TYP: stub ``numpy.f2py`` +* `#28523 `__: TYP: annotate the missing deprecated ``row_stack`` function +* `#28524 `__: CI, TST: Bump to cibuildwheel 2.23 (Pyodide 0.27.0) for WASM... +* `#28525 `__: TYP: fix stubtest errors in ``numpy.dtype`` and ``numpy.dtypes.\*`` +* `#28526 `__: TYP: fix stubtest errors in ``timedelta64`` and ``object_`` +* `#28527 `__: TYP: fix stubtest errors in ``numpy.lib._function_base_impl`` +* `#28528 `__: TYP: fix stubtest errors in ``numpy.lib._arraysetops_impl`` +* `#28529 `__: TYP: fix stubtest errors in ``numpy.lib._index_tricks_impl`` +* `#28530 `__: TYP: fix stubtest errors in ``numpy.lib._twodim_base_impl`` +* `#28531 `__: ENH: Add Cygwin extensions to list to copy to CWD in f2py meson... +* `#28532 `__: DOC: minor editorial change +* `#28535 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28536 `__: TYP: fix stubtest errors in ``numpy._globals`` +* `#28537 `__: TYP: fix stubtest errors in ``numpy.mat[rix]lib`` +* `#28538 `__: TYP: fix stubtest errors in ``numpy.random`` +* `#28539 `__: TYP: fix stubtest errors in ``numpy.testing`` +* `#28540 `__: TYP: fix typing errors in ``numpy.ndarray`` +* `#28541 `__: TYP: fix stubtest error in ``numpy.ma`` +* `#28546 `__: MAINT: Update main after NumPy 2.2.4 release. +* `#28547 `__: MAINT: Bump pypa/cibuildwheel from 2.23.0 to 2.23.1 +* `#28555 `__: MAINT: Bump actions/download-artifact from 4.1.9 to 4.2.0 +* `#28556 `__: NEP 54: Change status to Accepted +* `#28560 `__: MAINT: Bump actions/download-artifact from 4.2.0 to 4.2.1 +* `#28561 `__: MAINT: Bump github/codeql-action from 3.28.11 to 3.28.12 +* `#28562 `__: MAINT: Bump actions/upload-artifact from 4.6.1 to 4.6.2 +* `#28563 `__: MAINT: Bump actions/cache from 4.2.2 to 4.2.3 +* `#28568 `__: MAINT: Bump astral-sh/setup-uv from 5.3.1 to 5.4.0 +* `#28569 `__: Fixing various spelling errors +* `#28571 `__: BLD: use ``manylinux_2_28:2025.03.23-1`` [wheel build] +* `#28576 `__: API,ENH: Allow forcing an array result in ufuncs +* `#28577 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28581 `__: MAINT: Bump github/codeql-action from 3.28.12 to 3.28.13 +* `#28586 `__: MAINT: Bump pypa/cibuildwheel from 2.23.1 to 2.23.2 +* `#28587 `__: MAINT: Bump actions/setup-python from 5.4.0 to 5.5.0 +* `#28591 `__: TYP: Type masked array shape, dtype, __int__, and __float__ +* `#28593 `__: TYP: Type ``numpy.ma.min`` +* `#28600 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28601 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.6.1... +* `#28607 `__: CI: fix cirrus config [wheel build] +* `#28611 `__: MAINT: Bump astral-sh/setup-uv from 5.4.0 to 5.4.1 +* `#28612 `__: TYP: Type ``ma.max`` and ``ma.ptp`` +* `#28615 `__: ENH: Upgrade Array API version to 2024.12 +* `#28616 `__: TYP: Type ``ma.MaskedArray.min`` +* `#28617 `__: MAINT: Bump actions/dependency-review-action from 4.5.0 to 4.6.0 +* `#28618 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28619 `__: ENH: Use openmp on x86-simd-sort to speed up np.sort and np.argsort +* `#28621 `__: DOC: Fix typo in ``numpy/typing/__init__.py`` +* `#28623 `__: TYP: Type ``ma.MaskedArray.max`` and ``ma.MaskedArray.ptp`` +* `#28624 `__: BUG: fix ``np.vectorize`` for object dtype +* `#28626 `__: DOC: update array API standard version in compatibility page +* `#28627 `__: MAINT: replace string.format() with f-strings +* `#28635 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28637 `__: TYP: Fix overload for ``ma.MaskedArray.{min,max,ptp}`` and ``ma.{min,max,ptp}`` ... +* `#28638 `__: TYP: Type ``MaskedArray.{argmin, argmax}`` and ``np.ma.{argmin,``... +* `#28643 `__: BUG: fix deepcopying StringDType arrays +* `#28644 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28645 `__: DOC: fixes classes decorated with set_module not showing its... +* `#28647 `__: DOC: Fix typos found by codespell +* `#28649 `__: ENH: Improve np.linalg.det performance +* `#28653 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28657 `__: TYP: simplified type-aliases in ``numpy._typing`` +* `#28660 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28662 `__: MAINT: Remove distutils CPU dispatcher compatibility code +* `#28664 `__: TYP: Type ``MaskedArray.sort`` +* `#28666 `__: MAINT: Bump github/codeql-action from 3.28.13 to 3.28.14 +* `#28667 `__: TYP: replace ``_ScalarType`` with ``_SCT`` +* `#28668 `__: TYP: replace ``_ArrayType`` with ``_ArrayT`` +* `#28669 `__: TYP: default to ``dtype[Any]`` +* `#28671 `__: SIMD: Fix Highway QSort symbol linking error on aarch32/ASIMD +* `#28672 `__: MAINT: Bump github/codeql-action from 3.28.14 to 3.28.15 +* `#28674 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28675 `__: TYP: fix and improve ``numpy.lib._type_check_impl`` +* `#28676 `__: TYP: fix mypy test failures +* `#28677 `__: TYP: Type ``MaskedArray.partition`` and ``MaskedArray.argpartition`` +* `#28678 `__: DEP: Deprecate ``.T`` property for non-2dim arrays and scalars +* `#28680 `__: TYP: Type ``MaskedArray.take`` and ``np.ma.take`` +* `#28684 `__: TYP: replace ``_DType`` with ``_DTypeT`` +* `#28688 `__: TYP: rename ``_ShapeType`` TypeVar to ``_ShapeT`` +* `#28689 `__: TYP: Type ``MaskedArray.__{ge,gt,le,lt}__`` +* `#28690 `__: TYP: replace ``_SCT`` with ``_ScalarT`` +* `#28693 `__: BLD: fix meson_version warning +* `#28695 `__: DOC: linalg.matrix_transpose: add alias note +* `#28699 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28702 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28703 `__: MAINT: Improve float16 and float32 printing +* `#28710 `__: ENH: Improve performance for np.result_type +* `#28712 `__: MAINT: ``%i`` → ``%d`` +* `#28715 `__: TYP: Type ``np.ma.{is_masked,ndim,size,ids,iscontiguous}`` +* `#28717 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28719 `__: MAINT: switching from ``%i`` to ``fstrings`` +* `#28720 `__: TYP: drop py310 support +* `#28724 `__: STY: Apply assorted ruff rules (RUF) +* `#28725 `__: STY: Enforce ruff/pycodestyle warnings (W) +* `#28726 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28728 `__: STY: Apply assorted ruff/pyupgrade rules (UP) +* `#28731 `__: BUG: Prevent nanmax/nanmin from copying memmap arrays +* `#28733 `__: TYP: remove ``_typing._UnknownType`` and ``_ArrayLikeUnknown`` +* `#28735 `__: TYP: Type ``MaskedArray.count`` and ``np.ma.count`` +* `#28738 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28739 `__: MNT: get rid of references to Python 3.10 +* `#28740 `__: MAINT: Bump astral-sh/setup-uv from 5.4.1 to 5.4.2 +* `#28741 `__: BUG: Re-enable overriding functions in the ``np.strings`` module. +* `#28742 `__: TYP: Type ``MaskedArray.filled`` and ``np.ma.filled`` +* `#28743 `__: MNT: Enforce ruff/pygrep-hooks rules (PGH) +* `#28744 `__: STY: Apply more ruff rules (RUF) +* `#28745 `__: TYP: Type ``MaskedArray.put``\ , ``np.ma.put``\ , ``np.ma.putmask`` +* `#28746 `__: TYP: ``numpy.ma`` squiggly line cleanup +* `#28747 `__: TYP: some ``[arg]partition`` fixes +* `#28748 `__: ENH: Support Python 3.14 +* `#28750 `__: TYP: fix ``count_nonzero`` signature +* `#28751 `__: MNT: discard Python 2 leftover +* `#28752 `__: MNT: Apply ruff/Pylint rule PLW0129 (assertions that never fail) +* `#28754 `__: MNT: Enforce ruff/Pylint Error rules (PLE) +* `#28755 `__: MNT: Apply assorted ruff/Pylint Refactor rules (PLR) +* `#28756 `__: MNT: Apply assorted ruff/Pylint Warning rules (PLW) +* `#28757 `__: BUG: Fix AVX512_SPR dispatching for SVML half-precision operations +* `#28760 `__: STY: Apply ruff/pyupgrade rule UP032 +* `#28763 `__: STY: Use f-string instead of ``format`` call +* `#28764 `__: MNT: Enforce ruff rules: Flynt (FLY) and flake8-pie (PIE) +* `#28765 `__: MNT: Enforce ruff/flake8-bugbear rules (B) +* `#28766 `__: TYP: Type ``MaskedArray.compressed`` and ``np.ma.compressed`` +* `#28768 `__: MAINT: getting rid of old ``%`` and ``.format(...)`` strings... +* `#28769 `__: ENH: Improve Floating Point Cast Performance on ARM +* `#28770 `__: MNT: Enforce ruff/pyupgrade rules (UP) +* `#28771 `__: ENH: Include offset in error message when fallocate() fails +* `#28775 `__: STY: Partially apply ruff/pycodestyle rules (E) +* `#28779 `__: MAINT: Update main after Numpy 2.2.5 release +* `#28789 `__: BUG: Re-enable GCC function-specific optimization attributes +* `#28793 `__: TYP: Type ``np.ma.allclose`` and ``np.ma.allequal`` +* `#28798 `__: TST: skip test if spawning threads triggers a RuntimeError +* `#28803 `__: MAINT: Bump github/codeql-action from 3.28.15 to 3.28.16 +* `#28804 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28806 `__: BUG: Fix `` __array__(None)`` to preserve dtype +* `#28807 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28808 `__: CI: Make clang_TSAN CI job use cpython_sanity docker image +* `#28809 `__: TYP: write ``dtype[Any]`` as ``dtype`` +* `#28810 `__: TYP: replace ``_Self`` type parameters with ``typing.Self`` +* `#28811 `__: TYP: remove unnecessary scalar-type ``Any`` type-args +* `#28816 `__: MAINT: Bump actions/setup-python from 5.5.0 to 5.6.0 +* `#28817 `__: MAINT: Bump astral-sh/setup-uv from 5.4.2 to 6.0.0 +* `#28818 `__: MAINT: Bump actions/download-artifact from 4.2.1 to 4.3.0 +* `#28819 `__: TYP: simplify redundant unions of builtin scalar types +* `#28820 `__: TYP: ``None`` at the end of a union +* `#28821 `__: BUG: Use unrotated companion matrix in polynomial.polyroots. +* `#28831 `__: TYP: Fix type annotations for ``np.ma.nomask`` and ``np.ma.MaskType`` +* `#28832 `__: TYP: Type ``np.ma.getmask`` +* `#28833 `__: TYP: Type ``np.ma.is_mask`` +* `#28836 `__: ENH: Provide Windows 11 ARM64 wheels (#22530) +* `#28841 `__: BUG: Fix Clang warning in loops_half.dispatch.c.src +* `#28845 `__: TYP: Type ``MaskedArray.nonzero`` +* `#28847 `__: TYP: Use _Array1D alias in ``numpy.ma.core.pyi`` +* `#28848 `__: TYP: Type ``MaskedArray.ravel`` +* `#28849 `__: TYP: Type ``MaskedArray.repeat``\ , improve overloads for ``NDArray.repeat``\... +* `#28850 `__: TYP: Type ``MaskedArray.swapaxes`` +* `#28854 `__: MAINT: Bump pypa/cibuildwheel from 2.23.2 to 2.23.3 +* `#28855 `__: TYP: add missing ``mod`` params to ``__[r]pow__`` +* `#28856 `__: TYP: generic ``StringDType`` +* `#28857 `__: TYP: implicit ``linalg`` private submodule re-exports +* `#28858 `__: TYP: fix the ``set_module`` signature +* `#28859 `__: DOC: Replace http:// with https:// +* `#28860 `__: BLD: update vendored Meson: v1.6.1 and iOS support +* `#28862 `__: BUG: fix stringdtype singleton thread safety +* `#28863 `__: TYP: Improve consistency of (masked) array typing aliases +* `#28867 `__: TYP: Type ``MaskedArray.{__setmask__,mask,harden_mask,soften_mask,hardmask,unsha``... +* `#28868 `__: TYP: Type ``MaskedArray.{imag, real, baseclass, mT}`` +* `#28869 `__: MAINT: Bump astral-sh/setup-uv from 6.0.0 to 6.0.1 +* `#28870 `__: MNT: retire old script for SVN repositories +* `#28871 `__: MNT: retire script superseded by ruff rule W605 +* `#28872 `__: DOC: consistent and updated LICENSE files for wheels +* `#28874 `__: DOC: ``numpy.i`` will not be included as part of SWIG +* `#28876 `__: MNT: discard unused function using os.system() +* `#28877 `__: DOC: update content of cross compilation build docs +* `#28878 `__: STY: Enforce more ruff rules +* `#28879 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28880 `__: TYP: Type ``MaskedArray.all`` and ``MaskedArray.any`` +* `#28882 `__: MAINT: address warning in SWIG tests +* `#28883 `__: MAINT: from_dlpack thread safety fixes +* `#28884 `__: DEP: deprecate ``numpy.typing.NBitBase`` +* `#28887 `__: MAINT: Bump github/codeql-action from 3.28.16 to 3.28.17 +* `#28888 `__: DOC: math mode x to \times in docstring for numpy.linalg.multi_dot +* `#28892 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28893 `__: TYP: remove non-existent extended-precision scalar types +* `#28898 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28904 `__: BLD: update vendored Meson to include iOS fix +* `#28905 `__: TYP: Test ``MaskedArray.transpose`` and ``MaskedArray.T``\ ,... +* `#28906 `__: TYP: np.argmin and np.argmax overload changes +* `#28908 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28912 `__: TYP: add ``float64`` overloads to ``{lin,log,geom}space`` +* `#28918 `__: DOC: Fixes absent line numbers on link to classes decorated with... +* `#28923 `__: BUG: Use string conversion defined on dtype for .str +* `#28927 `__: MAINT: Remove outdated ``MaskedArray.__div__`` and ``MaskedArray.__idiv__`` +* `#28928 `__: MNT: add support for 3.14.0b1 +* `#28929 `__: MAINT: remove py2 ``__div__`` methods from ``poly1d`` and ``ABCPolyBase`` +* `#28930 `__: MAINT: remove py2 ``__div__`` remnants from the tests +* `#28931 `__: MAINT: remove py2 ``__div__`` methods from ``lib.user_array.container`` +* `#28932 `__: MAINT: remove references to 256-bits extended precision types +* `#28933 `__: MAINT: Use consistent naming for ``numpy/typing/tests/data/fail/ma.pyi`` +* `#28934 `__: TYP, TST: improved type-testing +* `#28935 `__: MAINT: Enable ruff E251 +* `#28936 `__: TST: Prevent import error when tests are not included in the... +* `#28937 `__: CI: fix TSAN CI by using a different docker image +* `#28938 `__: MNT: clean up free-threaded CI configuration +* `#28939 `__: MAINT: Bump actions/dependency-review-action from 4.6.0 to 4.7.0 +* `#28940 `__: TYP: optional type parameters for ``ndarray`` and ``flatiter`` +* `#28941 `__: DOC: Fix titles in ``development_ghcodespaces.rst`` +* `#28945 `__: MAINT: Enable linting with ruff E501 +* `#28952 `__: MAINT: Bump actions/dependency-review-action from 4.7.0 to 4.7.1 +* `#28954 `__: MAINT: Enable linting with ruff E501 for numpy._core +* `#28956 `__: DOC: Remove references to Python 2/3 +* `#28958 `__: TYP: reject complex scalar types in ``ndarray.__ifloordiv__`` +* `#28959 `__: TYP: remove redundant ``ndarray`` inplace operator overloads +* `#28960 `__: TYP: fix mypy & pyright errors in ``np.matrix`` +* `#28961 `__: DEP: finalize removal of ``numpy.compat`` +* `#28962 `__: TYP: type-testing without the mypy plugin +* `#28963 `__: MAINT: Update ruff to 0.11.9 in linting requirements +* `#28969 `__: MNT: Enforce ruff/isort rules (I) +* `#28971 `__: MAINT: Enable linting with ruff E501 +* `#28972 `__: MNT: Get rif of ``# pylint: `` pragma controls +* `#28974 `__: MNT: Get rid of ``version: $Id`` CVS tags +* `#28975 `__: MNT: import numpy as np +* `#28976 `__: MNT: Get rid of Pyflakes / flake8 +* `#28977 `__: MNT: Enforce ruff/flake8-implicit-str-concat rules (ISC) +* `#28978 `__: MNT: Enforce ruff/pandas-vet rules (PD) +* `#28981 `__: STY: reformat the ``_typing`` imports without trailing commas +* `#28982 `__: TYP: Gradual shape type defaults +* `#28984 `__: MNT: Use isinstance() instead of comparing type() +* `#28986 `__: TYP: Type ``MaskedArray.__{iadd,isub,imul,itruediv,ifloordiv,ipow}__`` +* `#28987 `__: MNT: Align ruff pin between ``requirements/linter_requirements.txt``... +* `#28988 `__: TYP: add missing ``ndarray.__{add,mul}__`` ``character`` type... +* `#28989 `__: MAINT: Bump github/codeql-action from 3.28.17 to 3.28.18 +* `#28990 `__: Revert "DEP: Deprecate ``.T`` property for non-2dim arrays and... +* `#28993 `__: MAINT: update NPY_FEATURE_VERSION after dropping python 3.10 +* `#28994 `__: TYP: allow inplace division of ``NDArray[timedelta64]`` by floats +* `#28995 `__: TYP: remove ``from __future__ import annotations`` +* `#28998 `__: MAINT: Update main after 2.2.6 release. +* `#29002 `__: MAINT: Update download-wheels for multiple pages +* `#29006 `__: ENH: Disable the alloc cache under address and memory sanitizers +* `#29008 `__: MNT: fix CI issues on main +* `#29018 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29033 `__: BUG: Fix workflow error +* `#29042 `__: MNT: constant string arrays instead of pointers in C +* `#29043 `__: BUG: Avoid compile errors in f2py modules +* `#29044 `__: BUG: Fix f2py derived types in modules +* `#29046 `__: BUG: Fix cache use regression +* `#29047 `__: REL: Prepare for the NumPy 2.3.0rc1 release [wheel build] +* `#29070 `__: TYP: Various typing fixes. +* `#29072 `__: MAINT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29073 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29074 `__: BUG: add bounds-checking to in-place string multiply +* `#29082 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel... +* `#29089 `__: MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in... +* `#29099 `__: BUG: f2py: thread-safe forcomb (#29091) +* `#29100 `__: TYP: fix NDArray[integer] inplace operator mypy issue +* `#29101 `__: PERF: Make NpzFile member existence constant time +* `#29116 `__: MAINT: Update to vs2022 in NumPy 2.3.x [wheel build] +* `#29118 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29132 `__: MAINT: Fix for segfaults with GCC 15 + diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index 74f11a0b4537..faad9ffcc8eb 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -4,16 +4,523 @@ NumPy 2.3.0 Release Notes ========================== +The NumPy 2.3.0 release continues the work to improve free threaded Python +support and annotations together with the usual set of bug fixes. It is unusual +in the number of expired deprecations, code modernizations, and style cleanups. +The latter may not be visible to users, but is important for code maintenance +over the long term. Note that we have also upgraded from manylinux2014 to +manylinux_2_28. + +Users running on a Mac having an M4 cpu might see various warnings about +invalid values and such. The warnings are a known problem with Accelerate. +They are annoying, but otherwise harmless. Apple promises to fix them. + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + Highlights ========== -*We'll choose highlights for this release near the end of the release cycle.* +* Interactive examples in the NumPy documentation. +* Building NumPy with OpenMP Parallelization. +* Preliminary support for Windows on ARM. +* Improved support for free threaded Python. +* Improved annotations. + + +New functions +============= + +New function ``numpy.strings.slice`` +------------------------------------ +The new function ``numpy.strings.slice`` was added, which implements fast +native slicing of string arrays. It supports the full slicing API including +negative slice offsets and steps. + +(`gh-27789 `__) + + +Deprecations +============ + +* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic + static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` + section of your mypy configuration. If this change results in new errors being + reported, kindly open an issue. + + (`gh-28129 `__) + +* The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in + a future version. + + This type was previously intended to be used as a generic upper bound for + type-parameters, for example: + + .. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + + But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete + subtypes, causing static type-checkers to reject ``x: np.float64 = + f(np.complex128(42j))``. + + So instead, the better approach is to use ``typing.overload``: + + .. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... + + (`gh-28884 `__) + + +Expired deprecations +==================== + +* Remove deprecated macros like ``NPY_OWNDATA`` from Cython interfaces in favor + of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` + in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove alias ``generate_divbyzero_error`` to + ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to + ``npy_set_floatstatus_overflow`` (deprecated since 1.10) + + (`gh-28254 `__) + +* Remove ``np.tostring`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) + + (`gh-28254 `__) + +* Raise when using ``np.bincount(...minlength=None)``, use 0 instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Passing ``shape=None`` to functions with a non-optional shape argument + errors, use ``()`` instead (deprecated since 1.20) + + (`gh-28254 `__) + +* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) + + (`gh-28254 `__) + +* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) + + (`gh-28254 `__) + +* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they + would guess (deprecated since 1.18) + + (`gh-28254 `__) + +* ``datetime64`` and ``timedelta64`` construction with a tuple no longer + accepts an ``event`` value, either use a two-tuple of (unit, num) or a + 4-tuple of (unit, num, den, 1) (deprecated since 1.14) + + (`gh-28254 `__) + +* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that + attribute must be a dtype-instance rather than a thing that can be parsed as + a dtype instance (deprecated in 1.19). At some point the whole construct of + using a dtype attribute will be deprecated (see #25306) + + (`gh-28254 `__) + +* Passing booleans as partition index errors (deprecated since 1.23) + + (`gh-28254 `__) + +* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) + + (`gh-28254 `__) + +* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* Disallow make a non-writeable array writeable for arrays with a base that do + not own their data (deprecated since 1.17) + + (`gh-28254 `__) + +* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, + not ``unsafe`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Unpickling a scalar with object dtype errors (deprecated since 1.20) + + (`gh-28254 `__) + +* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated + since 1.19) + + (`gh-28254 `__) + +* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, + ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since + 1.19) + + (`gh-28254 `__) + +* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or + ``scalar.round`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) + + (`gh-28254 `__) + +* Parsing an integer via a float string is no longer supported. (deprecated + since 1.23) To avoid this error you can + * make sure the original data is stored as integers. + * use the ``converters=float`` keyword argument. + * Use ``np.loadtxt(...).astype(np.int64)`` + + (`gh-28254 `__) + +* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` + or fill the tuple with ``None`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Special handling of matrix is in np.outer is removed. Convert to a ndarray + via ``matrix.A`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Removed the ``np.compat`` package source code (removed in 2.0) + + (`gh-28961 `__) + + +C API changes +============= + +* ``NpyIter_GetTransferFlags`` is now available to check if + the iterator needs the Python API or if casts may cause floating point + errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` + to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). + + (`gh-27883 `__) + +* ``NpyIter`` now has no limit on the number of operands it supports. + + (`gh-28080 `__) + +New ``NpyIter_GetTransferFlags`` and ``NpyIter_IterationNeedsAPI`` change +------------------------------------------------------------------------- +NumPy now has the new ``NpyIter_GetTransferFlags`` function as a more precise +way checking of iterator/buffering needs. I.e. whether the Python API/GIL is +required or floating point errors may occur. +This function is also faster if you already know your needs without buffering. + +The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were +previously performed at setup time. While it was never necessary to call it +multiple times, doing so will now have a larger cost. + +(`gh-27998 `__) + + +New Features +============ + +* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. + This way, static type-checkers will infer ``dtype: np.dtype`` as + ``dtype: np.dtype[Any]``, without reporting an error. + + (`gh-28669 `__) + +* Static type-checkers now interpret: + + - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. + - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. + + This is because their type parameters now have default values. + + (`gh-28940 `__) + +NumPy now registers its pkg-config paths with the pkgconf_ PyPI package +----------------------------------------------------------------------- +The pkgconf_ PyPI package provides an interface for projects like NumPy to +register their own paths to be added to the pkg-config search path. This means +that when using pkgconf_ from PyPI, NumPy will be discoverable without needing +for any custom environment configuration. + +.. attention:: Attention + + This only applies when using the pkgconf_ package from PyPI_, or put another + way, this only applies when installing pkgconf_ via a Python package + manager. + + If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or + any other source that does not use the pkgconf-pypi_ project, the NumPy + pkg-config directory will not be automatically added to the search path. In + these situations, you might want to use ``numpy-config``. + + +.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi +.. _PyPI: https://pypi.org/ +.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi + +(`gh-28214 `__) + +Allow ``out=...`` in ufuncs to ensure array result +-------------------------------------------------- +NumPy has the sometimes difficult behavior that it currently usually +returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). +This is especially problematic for non-numerical dtypes (e.g. ``object``). + +For ufuncs (i.e. most simple math functions) it is now possible to use +``out=...`` (literally \`...\`, e.g. ``out=Ellipsis``) which is identical in +behavior to ``out`` not being passed, but will ensure a non-scalar return. +This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` also ensures +a non-scalar return. + +Other functions with an ``out=`` kwarg should gain support eventually. +Downstream libraries that interoperate via ``__array_ufunc__`` or +``__array_function__`` may need to adapt to support this. + +(`gh-28576 `__) + +Building NumPy with OpenMP Parallelization +------------------------------------------ +NumPy now supports OpenMP parallel processing capabilities when built with the +``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. +When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for +parallel thread execution, improving performance for these operations. + +(`gh-28619 `__) + +Interactive examples in the NumPy documentation +----------------------------------------------- +The NumPy documentation includes a number of examples that +can now be run interactively in your browser using WebAssembly +and Pyodide. + +Please note that the examples are currently experimental in +nature and may not work as expected for all methods in the +public API. + +(`gh-26745 `__) + + +Improvements +============ + +* Scalar comparisons between non-comparable dtypes such as + ``np.array(1) == np.array('s')`` now return a NumPy bool instead of + a Python bool. + + (`gh-27288 `__) + +* ``np.nditer`` now has no limit on the number of supported operands + (C-integer). + + (`gh-28080 `__) + +* No-copy pickling is now supported for any + array that can be transposed to a C-contiguous array. + + (`gh-28105 `__) + +* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the + custom dtype over a more generic name constructed from its ``kind`` and + ``itemsize``. + + (`gh-28250 `__) + +* ``np.dot`` now reports floating point exceptions. + + (`gh-28442 `__) + +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. + + (`gh-28856 `__) + +Added warnings to ``np.isclose`` +-------------------------------- +Added warning messages if at least one of atol or rtol are either ``np.nan`` or +``np.inf`` within ``np.isclose``. + +* Warnings follow the user's ``np.seterr`` settings + +(`gh-28205 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` +----------------------------------------- +``np.unique`` now tries to use a hash table to find unique values instead of +sorting values before finding unique values. This is limited to certain dtypes +for now, and the function is now faster for those dtypes. The function now also +exposes a ``sorted`` parameter to allow returning unique values as they were +found, instead of sorting them afterwards. + +(`gh-26018 `__) + +Performance improvements to ``np.sort`` and ``np.argsort`` +---------------------------------------------------------- +``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel +thread execution, resulting in up to 3.5x speedups on x86 architectures with +AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built +with the -Denable_openmp Meson flag. Users can control the number of threads +used by setting the OMP_NUM_THREADS environment variable. + +(`gh-28619 `__) + +Performance improvements for ``np.float16`` casts +------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. + +(`gh-28769 `__) + + +Changes +======= + +* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` + now always returns zero for empty arrays. Empty arrays have at least one axis + of size zero. This affects ``np.linalg.norm``, ``np.linalg.vector_norm``, and + ``np.linalg.matrix_norm``. Previously, NumPy would raises errors or return + zero depending on the shape of the array. + + (`gh-28343 `__) + +* A spelling error in the error message returned when converting a string to a + float with the method ``np.format_float_positional`` has been fixed. + + (`gh-28569 `__) + +* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. + +* ``numpy.count_nonzero`` for ``axis=None`` (default) now returns a NumPy scalar + instead of a Python integer. + +* The parameter ``axis`` in ``numpy.take_along_axis`` function has now a default + value of ``-1``. + + (`gh-28615 `__) + +* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by + adjusting the transition to scientific notation based on the floating point precision. + A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. + + (`gh-28703 `__) + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +``unique_values`` may return unsorted data +------------------------------------------ +The relatively new function (added in NumPy 2.0) ``unique_values`` may now +return unsorted results. Just as ``unique_counts`` and ``unique_all`` these +never guaranteed a sorted result, however, the result was sorted until now. In +cases where these do return a sorted result, this may change in future releases +to improve performance. + +(`gh-26018 `__) + +Changes to the main iterator and potential numerical changes +------------------------------------------------------------ +The main iterator, used in math functions and via ``np.nditer`` from Python and +``NpyIter`` in C, now behaves differently for some buffered iterations. This +means that: + +* The buffer size used will often be smaller than the maximum buffer sized + allowed by the ``buffersize`` parameter. + +* The "growinner" flag is now honored with buffered reductions when no operand + requires buffering. + +For ``np.sum()`` such changes in buffersize may slightly change numerical +results of floating point operations. Users who use "growinner" for custom +reductions could notice changes in precision (for example, in NumPy we removed +it from ``einsum`` to avoid most precision changes and improve precision for +some 64bit floating point inputs). + +(`gh-27883 `__) + +The minimum supported GCC version is now 9.3.0 +---------------------------------------------- +The minimum supported version was updated from 8.4.0 to 9.3.0, primarily in +order to reduce the chance of platform-specific bugs in old GCC versions from +causing issues. + +(`gh-28102 `__) + +Changes to automatic bin selection in numpy.histogram +----------------------------------------------------- +The automatic bin selection algorithm in ``numpy.histogram`` has been modified +to avoid out-of-memory errors for samples with low variation. For full control +over the selected bins the user can use set the ``bin`` or ``range`` parameters +of ``numpy.histogram``. + +(`gh-28426 `__) + +Build manylinux_2_28 wheels +--------------------------- +Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the +``manylinux2014`` tag), which means dropping support for redhat7/centos7, +amazonlinux2, debian9, ubuntu18.04, and other pre-glibc2.28 operating system +versions, as per the `PEP 600 support table`_. + +.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check + +(`gh-28436 `__) +Remove use of -Wl,-ld_classic on macOS +-------------------------------------- +Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, +and results in libraries that cannot link to other libraries built with ld +(new). -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +(`gh-28713 `__) -.. **Content from release note snippets in doc/release/upcoming_changes:** +Re-enable overriding functions in the ``numpy.strings`` +------------------------------------------------------- +Re-enable overriding functions in the ``numpy.strings`` module. -.. include:: notes-towncrier.rst +(`gh-28741 `__) From 3467b99ad3ff2d04e272acb2eb6cc62c78dcc7f7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 7 Jun 2025 23:54:21 +0200 Subject: [PATCH 081/294] TYP: update typing stubs for ``_pyinstaller/hook-numpy.py`` --- numpy/_pyinstaller/hook-numpy.pyi | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi index 2642996dad7e..6da4914d7e5a 100644 --- a/numpy/_pyinstaller/hook-numpy.pyi +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -1,13 +1,6 @@ from typing import Final -# from `PyInstaller.compat` -is_conda: Final[bool] -is_pure_conda: Final[bool] +binaries: Final[list[tuple[str, str]]] = ... -# from `PyInstaller.utils.hooks` -def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... - -binaries: Final[list[tuple[str, str]]] - -hiddenimports: Final[list[str]] -excludedimports: Final[list[str]] +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... From f8204f5e0f338ffad588e56704e596e6678dc4f4 Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Sun, 8 Jun 2025 00:01:43 +0200 Subject: [PATCH 082/294] DOC: Document assertion comparison behavior between scalar and empty array Noted in #27457. The minimal thing we should do is document this behavior. While at it, I slightly homogenized the notes. --- numpy/testing/_private/utils.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index d7ceaeab72cc..9bc60d28b160 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -293,9 +293,10 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): Notes ----- - By default, when one of `actual` and `desired` is a scalar and the other is - an array, the function checks that each element of the array is equal to - the scalar. This behaviour can be disabled by setting ``strict==True``. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -980,9 +981,10 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Notes ----- - When one of `actual` and `desired` is a scalar and the other is array_like, - the function checks that each element of the array_like object is equal to - the scalar. This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1651,10 +1653,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Notes ----- - When one of `actual` and `desired` is a scalar and the other is - array_like, the function performs the comparison as if the scalar were - broadcasted to the shape of the array. - This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function performs the comparison as if the scalar were broadcasted to the shape + of the array. Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- From 7bf85704f2e52589ce18b5124e29778227a36feb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 02:40:31 +0200 Subject: [PATCH 083/294] TYP: add missing ``numpy.lib`` exports --- numpy/lib/__init__.pyi | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 8532ef8d9fb9..6185a494d035 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,15 +1,30 @@ from numpy._core.function_base import add_newdoc from numpy._core.multiarray import add_docstring, tracemalloc_domain -from . import ( # noqa: F401 - array_utils, - format, - introspect, - mixins, - npyio, - scimath, - stride_tricks, -) +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import _array_utils_impl as _array_utils_impl +from . import _arraypad_impl as _arraypad_impl +from . import _arraysetops_impl as _arraysetops_impl +from . import _arrayterator_impl as _arrayterator_impl +from . import _datasource as _datasource +from . import _format_impl as _format_impl +from . import _function_base_impl as _function_base_impl +from . import _histograms_impl as _histograms_impl +from . import _index_tricks_impl as _index_tricks_impl +from . import _iotools as _iotools +from . import _nanfunctions_impl as _nanfunctions_impl +from . import _npyio_impl as _npyio_impl +from . import _polynomial_impl as _polynomial_impl +from . import _scimath_impl as _scimath_impl +from . import _shape_base_impl as _shape_base_impl +from . import _stride_tricks_impl as _stride_tricks_impl +from . import _twodim_base_impl as _twodim_base_impl +from . import _type_check_impl as _type_check_impl +from . import _ufunclike_impl as _ufunclike_impl +from . import _utils_impl as _utils_impl +from . import _version as _version +from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion @@ -18,6 +33,7 @@ __all__ = [ "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", From 128ab7f72ae83735dca2f27d2682503915e9d578 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 03:35:16 +0200 Subject: [PATCH 084/294] TYP: add ``containsderivedtypes`` to ``f2py.auxfuncs.__all__`` --- numpy/f2py/auxfuncs.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 1212f229c660..f2ff09faf33b 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -10,6 +10,7 @@ from .cfuncs import errmess __all__ = [ "applyrules", "containscommon", + "containsderivedtypes", "debugcapi", "dictappend", "errmess", @@ -200,7 +201,7 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # -def containsderivedtypes(rout: _ROut) -> _Bool: ... +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... From cd538067a4c6747aeddd68e5b0df9543da49a21c Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 03:37:24 +0200 Subject: [PATCH 085/294] TYP: remove ``run_command`` annotations in ``f2py.diagnose`` --- numpy/f2py/diagnose.pyi | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/f2py/diagnose.pyi b/numpy/f2py/diagnose.pyi index 29cc2b4988b3..b88194ac6bff 100644 --- a/numpy/f2py/diagnose.pyi +++ b/numpy/f2py/diagnose.pyi @@ -1,4 +1 @@ -from _typeshed import StrOrBytesPath - -def run_command(cmd: StrOrBytesPath) -> None: ... def run() -> None: ... From 61512bcef563f799dafff6266631985764386ffa Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 04:10:59 +0200 Subject: [PATCH 086/294] BUG: Missing array-api ``capabilities()`` key --- numpy/_array_api_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 6ea9e13587f4..067e38798718 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -94,14 +94,14 @@ def capabilities(self): >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True} + 'data-dependent shapes': True, + 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, + "max dimensions": 64, } def default_device(self): From 21c2e707dad89fb2498040c4badf231882f0404d Mon Sep 17 00:00:00 2001 From: V-R-S Date: Sun, 8 Jun 2025 17:02:03 +0000 Subject: [PATCH 087/294] TST: migrating from pytz to zoneinfo + tzdata (where needed) For migration from pytz to zoneinfo function get_tzoffset_from_pytzinfo from numpy/_core/src/multiarray/datetime.c is modified to use astimezone instead of fromutc. As the object ZoneInfo is not directly compatible to be used with datetime object. Hence, something like this would result in an exception. from datetime import datetime from zoneinfo import ZoneInfo a = datetime(2025, 6, 7, 10, 0, 0) zoneInfo = ZoneInfo("US/Central") b = zoneInfo.fromutc(a) ValueError: fromutc: dt.tzinfo is not self The function astimezone can be used with both pytz.timezone object and zoneinfo.ZoneInfo object But, if we want to use the datetime object consistently we cannot let it be a naive type i.e. without a timezone. As the default behaviour of astimezone would take the system timezone if the datetime object is not timezone aware. Hence, I had to also change the call to create datetime object to take UTC timezone. See #29064 --- environment.yml | 2 +- numpy/_core/src/multiarray/_datetime.h | 4 ++-- numpy/_core/src/multiarray/datetime.c | 10 +++++----- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 3 ++- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/environment.yml b/environment.yml index d2964bf78368..5f1ee5e81a5f 100644 --- a/environment.yml +++ b/environment.yml @@ -49,4 +49,4 @@ dependencies: - gitpython # Used in some tests - cffi - - pytz + - tzdata diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index dd25e1ffd6cc..112c57433094 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -174,8 +174,8 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, npy_bool from_pickle); /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone, npy_datetimestruct *dts); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 9c024dbcd91c..d820474532ca 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2245,8 +2245,8 @@ NpyDatetime_ConvertPyDateTimeToDatetimeStruct( } /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) @@ -2255,14 +2255,14 @@ get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) npy_datetimestruct loc_dts; /* Create a Python datetime to give to the timezone object */ - dt = PyDateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, - dts->hour, dts->min, 0, 0); + dt = PyDateTimeAPI->DateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, + dts->hour, dts->min, 0, 0, PyDateTime_TimeZone_UTC, PyDateTimeAPI->DateTimeType); if (dt == NULL) { return -1; } /* Convert the datetime from UTC to local time */ - loc_dt = PyObject_CallMethod(timezone_obj, "fromutc", "O", dt); + loc_dt = PyObject_CallMethod(dt, "astimezone", "O", timezone_obj); Py_DECREF(dt); if (loc_dt == NULL) { return -1; diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 18cfb219034d..019a69da687a 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ hypothesis==6.81.1 pytest==7.4.0 -pytz==2023.3.post1 +tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 4fb1d47bf50d..17260753db4a 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -4,7 +4,6 @@ setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' hypothesis==6.104.1 pytest==7.4.0 -pytz==2023.3.post1 pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" @@ -17,3 +16,5 @@ mypy==1.16.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer +tzdata + From e48fdef5d7bf8ba2f90972c531cdfa07481ae535 Mon Sep 17 00:00:00 2001 From: David Seifert Date: Sun, 8 Jun 2025 19:24:04 +0200 Subject: [PATCH 088/294] BUG: remove `NPY_ALIGNMENT_REQUIRED` * This machinery requires strict-aliasing UB and isn't needed anymore with any GCC from the last 15 years. This might also fix #25004. Fixes: #28991 --- numpy/_core/include/numpy/npy_cpu.h | 12 ------------ numpy/_core/src/multiarray/common.h | 15 ++++----------- numpy/_core/src/multiarray/compiled_base.c | 14 +++++--------- numpy/_core/src/multiarray/item_selection.c | 7 +++++-- .../src/multiarray/lowlevel_strided_loops.c.src | 6 +----- 5 files changed, 15 insertions(+), 39 deletions(-) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 72f7331a0267..52e9d5996bd1 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -120,16 +120,4 @@ information about your platform (OS, CPU and compiler) #endif -/* - * Except for the following architectures, memory access is limited to the natural - * alignment of data types otherwise it may lead to bus error or performance regression. - * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) - #define NPY_ALIGNMENT_REQUIRED 0 -#endif -#ifndef NPY_ALIGNMENT_REQUIRED - #define NPY_ALIGNMENT_REQUIRED 1 -#endif - #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index e356b8251931..a18f74bda71a 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -11,6 +11,7 @@ #include "npy_static_data.h" #include "npy_import.h" #include +#include #ifdef __cplusplus extern "C" { @@ -230,15 +231,6 @@ npy_uint_alignment(int itemsize) * compared to memchr it returns one stride past end instead of NULL if needle * is not found. */ -#ifdef __clang__ - /* - * The code below currently makes use of !NPY_ALIGNMENT_REQUIRED, which - * should be OK but causes the clang sanitizer to warn. It may make - * sense to modify the code to avoid this "unaligned" access but - * it would be good to carefully check the performance changes. - */ - __attribute__((no_sanitize("alignment"))) -#endif static inline char * npy_memchr(char * haystack, char needle, npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert) @@ -259,11 +251,12 @@ npy_memchr(char * haystack, char needle, } else { /* usually find elements to skip path */ - if (!NPY_ALIGNMENT_REQUIRED && needle == 0 && stride == 1) { + if (needle == 0 && stride == 1) { /* iterate until last multiple of 4 */ char * block_end = haystack + size - (size % sizeof(unsigned int)); while (p < block_end) { - unsigned int v = *(unsigned int*)p; + unsigned int v; + memcpy(&v, p, sizeof(v)); if (v != 0) { break; } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 86b60cf75944..fee0d4a61a78 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1620,19 +1620,15 @@ pack_inner(const char *inptr, bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); - if(out_stride == 1 && - (!NPY_ALIGNMENT_REQUIRED || isAligned)) { - npy_uint64 *ptr64 = (npy_uint64*)outptr; + if(out_stride == 1 && isAligned) { #if NPY_SIMD_WIDTH == 16 - npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); - ptr64[0] = bcomp; + npy_uint64 arr[1] = {bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48)}; #elif NPY_SIMD_WIDTH == 32 - ptr64[0] = bb[0] | (bb[1] << 32); - ptr64[1] = bb[2] | (bb[3] << 32); + npy_uint64 arr[2] = {bb[0] | (bb[1] << 32), bb[2] | (bb[3] << 32)}; #else - ptr64[0] = bb[0]; ptr64[1] = bb[1]; - ptr64[2] = bb[2]; ptr64[3] = bb[3]; + npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { for(int i = 0; i < 4; i++) { diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index d2db10633810..5c036b704774 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -4,6 +4,7 @@ #define PY_SSIZE_T_CLEAN #include #include +#include #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -2525,11 +2526,13 @@ count_nonzero_u8(const char *data, npy_intp bstride, npy_uintp len) len -= len_m; count = len_m - zcount; #else - if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(data, sizeof(npy_uint64))) { + if (npy_is_aligned(data, sizeof(npy_uint64))) { int step = 6 * sizeof(npy_uint64); int left_bytes = len % step; for (const char *end = data + len; data < end - left_bytes; data += step) { - count += count_nonzero_bytes_384((const npy_uint64 *)data); + npy_uint64 arr[6]; + memcpy(arr, data, step); + count += count_nonzero_bytes_384(arr); } len = left_bytes; } diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 01ffd225274f..0c4eb3dd9a8d 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -33,11 +33,7 @@ * instructions (16 byte). * So this flag can only be enabled if autovectorization is disabled. */ -#if NPY_ALIGNMENT_REQUIRED -# define NPY_USE_UNALIGNED_ACCESS 0 -#else -# define NPY_USE_UNALIGNED_ACCESS 0 -#endif +#define NPY_USE_UNALIGNED_ACCESS 0 #define _NPY_NOP1(x) (x) #define _NPY_NOP2(x) (x) From f0e082de06ee5ce1ad120a91a5d3053b1a6032d9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 8 Jun 2025 19:24:05 +0200 Subject: [PATCH 089/294] DOC: Document the removal of the NPY_ALIGNMENT_REQUIRED macro. --- doc/release/upcoming_changes/29094.compatibility.rst | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 doc/release/upcoming_changes/29094.compatibility.rst diff --git a/doc/release/upcoming_changes/29094.compatibility.rst b/doc/release/upcoming_changes/29094.compatibility.rst new file mode 100644 index 000000000000..961ee6504dae --- /dev/null +++ b/doc/release/upcoming_changes/29094.compatibility.rst @@ -0,0 +1,7 @@ +The Macro NPY_ALIGNMENT_REQUIRED has been removed +------------------------------------------------- +The macro was defined in the `npy_cpu.h` file, so might be regarded as +semipublic. As it turns out, with modern compilers and hardware it is almost +always the case that alignment is required, so numpy no longer uses the macro. +It is unlikely anyone uses it, but you might want to compile with the `-Wundef` +flag or equivalent to be sure. From da3c2b43453786194f04b7c1253794f19e54256e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Mon, 9 Jun 2025 12:02:26 -0300 Subject: [PATCH 090/294] DOC: Remove version switcher colors [skip actions][skip azp][skip cirrus] --- doc/source/_static/numpy.css | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 9df2f6c546c5..d086215dd638 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -22,18 +22,6 @@ body { /* Version switcher colors from PyData Sphinx Theme */ -.version-switcher__button[data-active-version-name*="devdocs"] { - background-color: var(--pst-color-warning); - border-color: var(--pst-color-warning); - opacity: 0.9; -} - -.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { - background-color: var(--pst-color-danger); - border-color: var(--pst-color-danger); - opacity: 0.9; -} - .version-switcher__menu a.list-group-item { font-size: small; } From a8d35009a6a3294423448b63233870f128b7002a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Mon, 9 Jun 2025 12:14:18 -0300 Subject: [PATCH 091/294] MAINT: Update comment for clarity --- doc/source/_static/numpy.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index d086215dd638..1555dafb5539 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -20,7 +20,7 @@ body { width: 15%; } -/* Version switcher colors from PyData Sphinx Theme */ +/* Version switcher from PyData Sphinx Theme */ .version-switcher__menu a.list-group-item { font-size: small; From aed7a608f579511e60bd036119990e4035cbe916 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 9 Jun 2025 17:14:19 +0200 Subject: [PATCH 092/294] CI: Run mypy with Python 3.13 --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 36e89504def7..81fa57239b9b 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -46,9 +46,9 @@ jobs: fail-fast: false matrix: os_python: + - [macos-latest, '3.13'] - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] - - [macos-latest, '3.11'] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: From 7d950ec5525fbdaf8b9752364bf52de3cd127436 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 9 Jun 2025 22:18:29 +0300 Subject: [PATCH 093/294] DOC: tweak release walkthrough for numpy.org news blurb [skip actions][skip azp][skip cirrus] --- doc/RELEASE_WALKTHROUGH.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 702803172477..6d2194b5c4e6 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -344,6 +344,8 @@ This assumes that you have forked ``_:: to the previous links for example. - For the ``*.0`` release in a cycle, add a new section at the top with a short description of the new features and point the news link to it. +- Edit the newsHeader and date fields at the top of news.md +- Also edit the butttonText on line 14 in content/en/config.yaml commit and push:: From 81b2a67fac4782498796d4012208593640faa5ec Mon Sep 17 00:00:00 2001 From: V-R-S Date: Tue, 10 Jun 2025 11:40:25 +0000 Subject: [PATCH 094/294] Adding refactored tests . . --- numpy/_core/tests/test_datetime.py | 27 ++++++++++---------------- numpy/_core/tests/test_deprecations.py | 6 ------ 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 1cbacb8a26a8..a452259ec5c9 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,6 @@ import datetime import pickle +from zoneinfo import ZoneInfo import pytest @@ -16,13 +17,6 @@ suppress_warnings, ) -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False - try: RecursionError except NameError: @@ -1886,7 +1880,6 @@ def test_datetime_as_string(self): np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') - @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') @@ -1901,29 +1894,29 @@ def test_datetime_as_string_timezone(self): b = np.datetime64('2010-02-15T06:30', 'm') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')), '2010-03-15T01:30-0500') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')), '2010-03-15T02:30-0400') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')), '2010-03-14T23:30-0700') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')), '2010-02-15T00:30-0600') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')), '2010-02-15T01:30-0500') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', - timezone=tz('US/Pacific')) + timezone=ZoneInfo('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', - timezone=tz('US/Pacific'), casting='unsafe'), + timezone=ZoneInfo('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', - timezone=tz('US/Central'), casting='unsafe'), + timezone=ZoneInfo('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index d90c15565c22..cb552357fc96 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -13,12 +13,6 @@ import numpy as np from numpy.testing import assert_raises, temppath -try: - import pytz # noqa: F401 - _has_pytz = True -except ImportError: - _has_pytz = False - class _DeprecationTestCase: # Just as warning: warnings uses re.match, so the start of this message From 0f1a6f81d3e2684802f59cd0d3da5b4ceac770f3 Mon Sep 17 00:00:00 2001 From: V-R-S Date: Tue, 10 Jun 2025 14:44:48 +0000 Subject: [PATCH 095/294] Removing additional references to pytz . . --- .github/workflows/linux.yml | 2 +- numpy/_core/multiarray.py | 6 +++--- numpy/_core/tests/test_datetime.py | 8 +++++++- requirements/doc_requirements.txt | 2 -- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 668c1191d055..a0e549d86775 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -271,7 +271,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pandas # spin check-docs -v # spin check-tutorials -v diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 236ca7e7c9aa..5599494720b6 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -1723,7 +1723,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- >>> import numpy as np - >>> import pytz + >>> from zoneinfo import ZoneInfo >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1736,9 +1736,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + >>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='=1.8.0 # interactive documentation utilities From 29c85f376bc41095cdc0c7060bb38605e34e040b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 19:58:40 +0200 Subject: [PATCH 096/294] TYP: Accept dispatcher function with optional returns in ``_core.overrides`` (#29171) Co-authored-by: Sebastian Berg --- numpy/_core/overrides.pyi | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 05453190efd4..91d624203e81 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,11 +1,11 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeVar - -from numpy._typing import _SupportsArrayFunc +from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar _T = TypeVar("_T") _Tss = ParamSpec("_Tss") -_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) +_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) + +_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] ### @@ -18,14 +18,11 @@ class ArgSpec(NamedTuple): keywords: str | None defaults: tuple[Any, ...] -def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... # -def verify_matching_signatures( - implementation: Callable[_Tss, object], - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], -) -> None: ... +def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks @@ -33,11 +30,11 @@ def verify_matching_signatures( # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. def array_function_dispatch( - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + dispatcher: _Dispatcher[_Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncT], _FuncT]: ... +) -> Callable[[_FuncLikeT], _FuncLikeT]: ... # def array_function_from_dispatcher( @@ -45,4 +42,4 @@ def array_function_from_dispatcher( module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... From a17726a2d6ce58d137a68e13711b6fb3e98989bb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:23:19 +0200 Subject: [PATCH 097/294] TYP: ``lib._iotools`` annotation improvements (#29177) --- numpy/lib/_iotools.pyi | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 21cfc3b19503..82275940e137 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -13,11 +13,12 @@ from typing import ( import numpy as np import numpy.typing as npt +from numpy._typing._dtype_like import _DTypeLikeNested _T = TypeVar("_T") @type_check_only -class _ValidationKwargs(TypedDict, total=False): +class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None deletechars: Iterable[str] | None case_sensitive: Literal["upper", "lower"] | bool | None @@ -25,7 +26,7 @@ class _ValidationKwargs(TypedDict, total=False): ### -__docformat__: Final[str] = "restructuredtext en" +__docformat__: Final = "restructuredtext en" class ConverterError(Exception): ... class ConverterLockError(ConverterError): ... @@ -98,17 +99,18 @@ class StringConverter: @classmethod def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... +def _decode_line(line: str | bytes, encoding: str) -> str: ... +def _is_string_like(obj: object) -> bool: ... +def _is_bytes_like(obj: object) -> bool: ... +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... @overload def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... @overload def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... - -# -def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... -def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... def easy_dtype( - ndtype: npt.DTypeLike, - names: Iterable[str] | None = None, + ndtype: str | Sequence[_DTypeLikeNested], + names: str | Sequence[str] | None = None, defaultfmt: str = "f%i", - **validationargs: Unpack[_ValidationKwargs], + **validationargs: Unpack[_NameValidatorKwargs], ) -> np.dtype[np.void]: ... From 9cbddda01babcc67b9aa72e707ea0320cf0ad22f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:27:11 +0200 Subject: [PATCH 098/294] TYP: ``any(None)`` and ``all(None)`` (#29176) --- numpy/_core/fromnumeric.pyi | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 050eb9f75c40..f0f83093c3b1 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -819,9 +819,10 @@ def sum( where: _ArrayLikeBool_co = ..., ) -> _ArrayT: ... +# keep in sync with `any` @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -830,7 +831,7 @@ def all( ) -> np.bool: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -839,7 +840,7 @@ def all( ) -> Incomplete: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -848,7 +849,7 @@ def all( ) -> _ArrayT: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -856,9 +857,10 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# keep in sync with `all` @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -867,7 +869,7 @@ def any( ) -> np.bool: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -876,7 +878,7 @@ def any( ) -> Incomplete: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -885,7 +887,7 @@ def any( ) -> _ArrayT: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -893,6 +895,7 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# @overload def cumsum( a: _ArrayLike[_ScalarT], From 42b875f72a44beb6acf19b6b39f2628c0cf6599a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:28:32 +0200 Subject: [PATCH 099/294] TYP: Fix invalid inline annotations in ``lib._function_base_impl`` (#29175) * TYP: Fix invalid inline annotations in ``lib._function_base_impl`` * TYP: ``ruff check --fix`` * TYP: prevent inline annotation from causing a circular import error --- numpy/lib/_function_base_impl.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 63346088b6e2..f217af64fb4b 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4711,14 +4711,14 @@ def _inverted_cdf(n, quantiles): def _quantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, - axis: int | None = None, - out=None, - overwrite_input: bool = False, - method="linear", -) -> np.array: + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray, + axis: int | None = None, + out: np.ndarray | None = None, + overwrite_input: bool = False, + method: str = "linear", +) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful # semantics. For now, keep the supported dimensions the same as it was @@ -4784,13 +4784,13 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): def _quantile( - arr: np.array, - quantiles: np.array, - axis: int = -1, - method="linear", - out=None, - weights=None, -): + arr: "np.typing.ArrayLike", + quantiles: np.ndarray, + axis: int = -1, + method: str = "linear", + out: np.ndarray | None = None, + weights: "np.typing.ArrayLike | None" = None, +) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce From 1512865bea80182f360318eb471335326897b707 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:31:11 +0200 Subject: [PATCH 100/294] TYP: ``numpy._NoValue`` (#29170) --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 41d7411dfdd8..ae26ac2f19f8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -432,6 +432,8 @@ from numpy._core.shape_base import ( ) from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue from numpy.lib import ( scimath as emath, @@ -495,8 +497,6 @@ from numpy.lib._function_base_impl import ( quantile, ) -from numpy._globals import _CopyMode - from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, From 7d8b1d5d49b6be5bd47ce2a2016283ffc5b43f0d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:33:36 +0200 Subject: [PATCH 101/294] TYP: ``out=...`` in ufuncs (#29169) --- numpy/_typing/_ufunc.pyi | 179 ++++++++++++++++++++++----------------- 1 file changed, 99 insertions(+), 80 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 104307da89db..790149d9c7fb 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,8 +4,9 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. -""" +""" # noqa: PYI021 +from types import EllipsisType from typing import ( Any, Generic, @@ -102,8 +103,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + /, + out: EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -115,8 +117,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -128,8 +131,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -176,7 +180,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = None, + out: EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -185,9 +189,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def __call__( self, x1: ArrayLike, - x2: NDArray[np.generic], + x2: NDArray[Any], /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -195,10 +199,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[np.generic], + x1: NDArray[Any], x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -209,7 +213,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -220,7 +224,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -239,7 +243,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., keepdims: bool = ..., initial: Any = ..., where: _ArrayLikeBool_co = ..., @@ -250,7 +254,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... def reduceat( @@ -259,7 +263,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @overload # (scalar, scalar) -> scalar @@ -269,7 +273,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: _ScalarLike_co, /, *, - out: None = None, + out: EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... @@ -277,21 +281,21 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def outer( self, A: ArrayLike, - B: NDArray[np.generic], + B: NDArray[Any], /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[np.generic], + A: NDArray[Any], B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -302,7 +306,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -313,7 +317,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any] | Any: ... @@ -340,10 +344,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -354,11 +360,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -369,11 +376,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: _SupportsArrayUFunc, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -410,11 +418,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -425,12 +435,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + x2: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -468,9 +479,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None = ..., + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = ..., *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -482,9 +494,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]], + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType, *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -556,7 +569,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co: ... @overload @@ -564,7 +577,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -580,7 +593,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> Any: ... @@ -611,7 +624,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -620,7 +633,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -638,7 +651,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -647,7 +660,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -656,11 +669,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduce( self, + /, array: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - /, keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -685,7 +698,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., *, keepdims: Literal[True], initial: _ScalarLike_co = ..., @@ -698,7 +711,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -707,12 +720,12 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduceat( self, + /, array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def reduceat( @@ -733,7 +746,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload def reduceat( @@ -743,21 +756,22 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., ) -> Any: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., @@ -771,7 +785,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload @@ -779,8 +793,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -788,8 +803,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -797,7 +813,8 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, + /, + *, out: _ArrayT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ArrayT: ... @@ -806,8 +823,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _SupportsArrayUFunc, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -815,8 +833,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -841,7 +860,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co: ... @overload @@ -852,7 +871,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -874,7 +893,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> Any: ... @@ -903,7 +922,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co]: ... @overload @@ -912,7 +931,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... @overload @@ -930,7 +949,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: _2PTuple[NDArray[Any]] | None = ..., + out: _2PTuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> Any: ... From 7bb70e10fa25d61c6328f6fecff41be647de7cf3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:36:46 +0200 Subject: [PATCH 102/294] TYP: Simplified ``dtype.__new__`` overloads (#29168) --- numpy/__init__.pyi | 271 ++++++++++++----------- numpy/typing/tests/data/reveal/dtype.pyi | 19 +- 2 files changed, 151 insertions(+), 139 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ae26ac2f19f8..0801e97b7061 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -866,8 +866,6 @@ _SignedIntegerCType: TypeAlias = type[ ] # fmt: skip _FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] _IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType -_NumberCType: TypeAlias = _IntegerCType -_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor @@ -1067,10 +1065,6 @@ class _SupportsFileMethods(SupportsFlush, Protocol): @type_check_only class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... -@type_check_only -class _SupportsItem(Protocol[_T_co]): - def item(self, /) -> _T_co: ... - @type_check_only class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... @@ -1177,7 +1171,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[float64] | None, + dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ... @@ -1207,36 +1201,31 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[builtins.bool | np.bool], + dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[np.bool]: ... - # NOTE: `_: type[int]` also accepts `type[int | bool]` @overload def __new__( cls, - dtype: type[int | int_ | np.bool], + dtype: type[int], # also accepts `type[builtins.bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[int_ | np.bool]: ... - # NOTE: `_: type[float]` also accepts `type[float | int | bool]` - # NOTE: `float64` inherits from `float` at runtime; but this isn't - # reflected in these stubs. So an explicit `float64` is required here. @overload def __new__( cls, - dtype: type[float | float64 | int_ | np.bool] | None, + dtype: type[float], # also accepts `type[int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[float64 | int_ | np.bool]: ... - # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` @overload def __new__( cls, - dtype: type[complex | complex128 | float64 | int_ | np.bool], + dtype: type[complex], # also accepts `type[float | int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1244,7 +1233,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[bytes], # also includes `type[bytes_]` + dtype: type[bytes | ct.c_char] | _BytesCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1252,7 +1241,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[str], # also includes `type[str_]` + dtype: type[str] | _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1266,7 +1255,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[memoryview | void], + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1276,127 +1265,182 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[_BuiltinObjectLike | object_], + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... - # Unions of builtins. + # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: type[bytes | str], + dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[character]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: type[bytes | str | memoryview], + dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[flexible]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... - - # `unsignedinteger` string-based representations and ctypes - @overload - def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... - @overload - def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... - @overload - def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... - @overload - def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... - @overload - def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... - @overload - def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... - @overload - def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - # NOTE: We're assuming here that `uint_ptr_t == size_t`, - # an assumption that does not hold in rare cases (same for `ssize_t`) + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[ulong]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... - @overload - def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... - @overload - def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... - @overload - def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... - @overload - def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + def __new__( + cls, + dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + def __new__( + cls, + dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[long]: ... # `floating` string-based representations and ctypes @overload - def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... - @overload - def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... - @overload - def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... - @overload - def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... - @overload - def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + def __new__( + cls, + dtype: _Float16Codes | _HalfCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float16]: ... @overload - def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + def __new__( + cls, + dtype: _Float32Codes | _SingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 @overload - def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[longdouble]: ... # `complexfloating` string-based representations @overload - def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... - @overload - def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... - @overload - def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + def __new__( + cls, + dtype: _Complex64Codes | _CSingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... @overload - def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + def __new__( + cls, + dtype: _Complex128Codes | _CDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... @overload - def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... - @overload - def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... - @overload - def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... - @overload - def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - @overload - def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + def __new__( + cls, + dtype: _TD64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[timedelta64]: ... @overload - def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + def __new__( + cls, + dtype: _DT64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @overload @@ -1460,35 +1504,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: _NumberCodes | _NumberCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[number]: ... - @overload - def __new__( - cls, - dtype: _CharacterCodes | type[ct.c_char], + dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[character]: ... - @overload - def __new__( - cls, - dtype: _FlexibleCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[flexible]: ... - @overload - def __new__( - cls, - dtype: _GenericCodes | _GenericCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[generic]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload @@ -1501,10 +1521,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): ) -> dtype: ... # Catch-all overload for object-likes - # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some - # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes - # the subtyping relation, the (gradual) typing analogue of `issubclass()`). - # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types @overload def __new__( cls, diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 721d2708737f..1794c944b3ae 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -14,12 +14,8 @@ dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] -py_int_co: type[int] -py_float_co: type[float] -py_complex_co: type[complex] py_object: type[_PyObjectLike] py_character: type[str | bytes] -py_flexible: type[str | bytes | memoryview] ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] ct_number: type[ct.c_uint8 | ct.c_float] @@ -48,19 +44,16 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(py_character), np.dtype[np.character]) assert_type(np.dtype(memoryview), np.dtype[np.void]) -assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +# object types assert_type(np.dtype(list), np.dtype[np.object_]) assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) @@ -75,12 +68,9 @@ assert_type(np.dtype("l"), np.dtype[np.long]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) -assert_type(np.dtype(cs_number), np.dtype[np.number]) -assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) -assert_type(np.dtype(cs_generic), np.dtype[np.generic]) # ctypes -assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) @@ -90,7 +80,7 @@ assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None assert_type(np.dtype(None), np.dtype[np.float64]) -# Dypes of dtypes +# dtypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) @@ -99,6 +89,7 @@ assert_type(np.dtype("S8"), np.dtype) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) # StringDType assert_type(np.dtype(dt_string), StringDType) From ceb858fe26aa39ddc973539d0271886e336198f1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:44:30 +0200 Subject: [PATCH 103/294] TYP: fix and improve ``numpy.lib._utils_impl`` (#29181) --- numpy/lib/_utils_impl.pyi | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 00ed47c9fb67..6fbd26f88084 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,10 +1,17 @@ +from typing import LiteralString + from _typeshed import SupportsWrite +from typing_extensions import TypeVar -from numpy._typing import DTypeLike +import numpy as np __all__ = ["get_include", "info", "show_runtime"] -def get_include() -> str: ... +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) + +def get_include() -> LiteralString: ... def show_runtime() -> None: ... -def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... -def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ... +def info( + object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" +) -> None: ... +def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... From ea91f1847bc82cd7379102571bdb02bb0cd396bc Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:47:23 +0200 Subject: [PATCH 104/294] TYP: ``double = float64`` and ``cdouble = complex128`` (#29155) --- numpy/__init__.pyi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0801e97b7061..d71a76569ad7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4236,9 +4236,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] -half: TypeAlias = floating[_NBitHalf] -single: TypeAlias = floating[_NBitSingle] -double: TypeAlias = floating[_NBitDouble] +half: TypeAlias = float16 +single: TypeAlias = float32 +double: TypeAlias = float64 longdouble: TypeAlias = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. @@ -4336,7 +4336,7 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): self, other: number[_NBit], mod: None = None, / ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... -complex64: TypeAlias = complexfloating[_32Bit, _32Bit] +complex64: TypeAlias = complexfloating[_32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] @overload @@ -4397,9 +4397,9 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... -csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] -cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] -clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle: TypeAlias = complex64 +cdouble: TypeAlias = complex128 +clongdouble: TypeAlias = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property From 18cbd6d1303b6c445ba451a707017be4723d28d0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 21:25:53 +0200 Subject: [PATCH 105/294] TYP: Fix missing ``_core.numeric`` (re-)exports (#29166) * TYP: Sync `_core/numerictypes.pyi` with NumType's `numpy-stubs` * TYP: Fix missing ``_core.numeric`` (re-)exports * TYP: appease ``ruff`` --- numpy/_core/numeric.pyi | 784 +++++++++++++++++++++++++++-------- numpy/_core/numerictypes.pyi | 87 ++-- 2 files changed, 661 insertions(+), 210 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 919fe1917197..b54fa856b007 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,3 +1,4 @@ +from builtins import bool as py_bool from collections.abc import Callable, Sequence from typing import ( Any, @@ -20,32 +21,12 @@ from numpy import ( True_, _OrderCF, _OrderKACF, - # re-exports bitwise_not, - broadcast, - complexfloating, - dtype, - flatiter, - float64, - floating, - from_dlpack, - # other - generic, inf, - int_, - intp, little_endian, - matmul, nan, - ndarray, - nditer, newaxis, - object_, - signedinteger, - timedelta64, ufunc, - unsignedinteger, - vecdot, ) from numpy._typing import ( ArrayLike, @@ -67,38 +48,113 @@ from numpy._typing import ( _SupportsArrayFunc, _SupportsDType, ) +from numpy.lib._array_utils_impl import normalize_axis_tuple as normalize_axis_tuple -from .fromnumeric import all as all -from .fromnumeric import any as any -from .fromnumeric import argpartition as argpartition -from .fromnumeric import matrix_transpose as matrix_transpose -from .fromnumeric import mean as mean +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .multiarray import ALLOW_THREADS as ALLOW_THREADS +from .multiarray import BUFSIZE as BUFSIZE +from .multiarray import CLIP as CLIP +from .multiarray import MAXDIMS as MAXDIMS +from .multiarray import MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS +from .multiarray import MAY_SHARE_EXACT as MAY_SHARE_EXACT +from .multiarray import RAISE as RAISE +from .multiarray import WRAP as WRAP from .multiarray import ( - # other _Array, _ConstructorEmpty, _KwargsEmpty, - # re-exports arange, array, asanyarray, asarray, ascontiguousarray, asfortranarray, + broadcast, can_cast, concatenate, copyto, dot, + dtype, empty, empty_like, + flatiter, + from_dlpack, frombuffer, fromfile, fromiter, fromstring, inner, lexsort, + matmul, may_share_memory, min_scalar_type, + ndarray, + nditer, nested_iters, promote_types, putmask, @@ -108,85 +164,473 @@ from .multiarray import ( where, zeros, ) +from .multiarray import normalize_axis_index as normalize_axis_index +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) __all__ = [ - "newaxis", - "ndarray", - "flatiter", - "nditer", - "nested_iters", - "ufunc", + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", "array", - "asarray", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", "asanyarray", + "asarray", "ascontiguousarray", "asfortranarray", - "zeros", - "count_nonzero", - "empty", - "broadcast", - "dtype", - "fromstring", - "fromfile", - "frombuffer", - "from_dlpack", - "where", - "argwhere", - "copyto", - "concatenate", - "lexsort", "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", "can_cast", - "promote_types", - "min_scalar_type", - "result_type", - "isfortran", - "empty_like", - "zeros_like", - "ones_like", - "correlate", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", "convolve", - "inner", - "dot", - "outer", - "vdot", - "roll", - "rollaxis", - "moveaxis", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", "cross", - "tensordot", - "little_endian", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", "fromiter", - "array_equal", - "array_equiv", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", "indices", - "fromfunction", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", "isscalar", - "binary_repr", - "base_repr", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", "ones", - "identity", - "allclose", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", "putmask", - "flatnonzero", - "inf", - "nan", - "False_", - "True_", - "bitwise_not", - "full", - "full_like", - "matmul", - "vecdot", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", "shares_memory", - "may_share_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", ] _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) _AnyShapeT = TypeVar( @@ -201,88 +645,90 @@ _AnyShapeT = TypeVar( _CorrelateMode: TypeAlias = L["valid", "same", "full"] +# keep in sync with `ones_like` @overload def zeros_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def zeros_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... ones: Final[_ConstructorEmpty] +# keep in sync with `zeros_like` @overload def ones_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def ones_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview @@ -389,46 +835,46 @@ def full( @overload def full_like( a: _ArrayT, - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def full_like( a: _ArrayLike[_ScalarT], - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, + a: object, + fill_value: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # @@ -441,10 +887,10 @@ def count_nonzero( a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] ) -> NDArray[np.intp]: ... @overload -def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> bool: ... +def isfortran(a: NDArray[Any] | generic) -> py_bool: ... def argwhere(a: ArrayLike) -> NDArray[intp]: ... @@ -836,31 +1282,31 @@ def identity( def allclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... @overload def isclose( a: _ScalarLike_co, b: _ScalarLike_co, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> np.bool: ... @overload def isclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> NDArray[np.bool]: ... -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... -def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... @overload def astype( @@ -868,8 +1314,8 @@ def astype( dtype: _DTypeLike[_ScalarT], /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... @overload def astype( @@ -877,6 +1323,6 @@ def astype( dtype: DTypeLike, /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype]: ... diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 753fe34800d5..b649b8f91cd1 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,5 +1,5 @@ -import builtins -from typing import Any, TypedDict, type_check_only +from builtins import bool as py_bool +from typing import Final, TypedDict, type_check_only from typing import Literal as L import numpy as np @@ -13,6 +13,8 @@ from numpy import ( clongdouble, complex64, complex128, + complex192, + complex256, complexfloating, csingle, datetime64, @@ -22,6 +24,8 @@ from numpy import ( float16, float32, float64, + float96, + float128, floating, generic, half, @@ -59,9 +63,8 @@ from numpy import ( void, ) from numpy._typing import DTypeLike -from numpy._typing._extended_precision import complex192, complex256, float96, float128 -from ._type_aliases import sctypeDict # noqa: F401 +from ._type_aliases import sctypeDict as sctypeDict from .multiarray import ( busday_count, busday_offset, @@ -152,41 +155,43 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] -def isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... - -typecodes: _TypeCodes -ScalarType: tuple[ - type[int], - type[float], - type[complex], - type[builtins.bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], - type[datetime64], - type[object_], - type[bytes_], - type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], - type[void], -] +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview], + type[np.bool], + type[csingle], + type[cdouble], + type[clongdouble], + type[half], + type[single], + type[double], + type[longdouble], + type[byte], + type[short], + type[intc], + type[long], + type[longlong], + type[timedelta64], + type[datetime64], + type[object_], + type[bytes_], + type[str_], + type[ubyte], + type[ushort], + type[uintc], + type[ulong], + type[ulonglong], + type[void], + ] +] = ... +typeDict: Final = sctypeDict From 8ce860ffaeecf2a8d3939a6201994a44e58584e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 13:56:41 -0600 Subject: [PATCH 106/294] MAINT: Bump pypa/cibuildwheel from 3.0.0b4 to 3.0.0 (#29180) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.0b4 to 3.0.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971...5f22145df44122af0f5a201f93cf0207171beca7) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.0.0 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 86628f6882cd..453a67088adf 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 + - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f74be5f4a455..68352eb1fc7c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -177,7 +177,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 + uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From a3ec9af71069a88d511c71d56993b4cc24f22bdd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 22:29:37 +0200 Subject: [PATCH 107/294] CI: Bump `array-api-tests` to `v2025.05.23` (#29149) * CI: Bump `array-api-tests` to `v2025.05.23` * TST: xfail new array-api test failures * TST: increase ``--max-examples`` from 100 to 500 for ``array-api-tests`` * CI: apply review suggestions Co-authored-by: Evgeni Burovski --------- Co-authored-by: Evgeni Burovski --- .github/workflows/linux.yml | 10 +++++----- tools/ci/array-api-xfails.txt | 15 +++++++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a0e549d86775..3452724841c3 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -156,7 +156,7 @@ jobs: # TODO: gcov env: PYTHONOPTIMIZE: 2 - + aarch64_test: needs: [smoke_test] @@ -204,7 +204,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - + - name: Creates new container run: | docker run --name the_container --interactive \ @@ -221,7 +221,7 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build + cd /numpy && spin build '" - name: Meson Log @@ -324,7 +324,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: repository: data-apis/array-api-tests - ref: 'c48410f96fc58e02eea844e6b7f6cc01680f77ce' # Latest commit as of 2025-04-01 + ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 submodules: 'true' path: 'array-api-tests' persist-credentials: false @@ -346,7 +346,7 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=100 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt + pytest array_api_tests -v -c pytest.ini -n 4 --max-examples=1000 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index 98c3895ced06..8370099015c5 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -1,5 +1,20 @@ # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] +array_api_tests/test_data_type_functions.py::test_finfo[complex64] + +# finfo: data type not inexact +array_api_tests/test_data_type_functions.py::test_finfo[float64] +array_api_tests/test_data_type_functions.py::test_finfo[complex128] + +# iinfo: Invalid integer data type 'O' +array_api_tests/test_data_type_functions.py::test_iinfo[int8] +array_api_tests/test_data_type_functions.py::test_iinfo[uint8] +array_api_tests/test_data_type_functions.py::test_iinfo[int16] +array_api_tests/test_data_type_functions.py::test_iinfo[uint16] +array_api_tests/test_data_type_functions.py::test_iinfo[int32] +array_api_tests/test_data_type_functions.py::test_iinfo[uint32] +array_api_tests/test_data_type_functions.py::test_iinfo[int64] +array_api_tests/test_data_type_functions.py::test_iinfo[uint64] # 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] From 3d1cdb476d12402f28548a32db3f35c4212bf3e2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 22:53:25 +0200 Subject: [PATCH 108/294] TYP: fix ``ravel_multi_index`` false rejections (#29184) --- numpy/_core/multiarray.pyi | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index e4f869b9beae..13a3f0077ce0 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -499,34 +499,28 @@ def array( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... +# @overload -def unravel_index( # type: ignore[misc] - indices: _IntLike_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[intp, ...]: ... -@overload -def unravel_index( - indices: _ArrayLikeInt_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[NDArray[intp], ...]: ... - -@overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[_IntLike_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> intp: ... @overload def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> NDArray[intp]: ... +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + # NOTE: Allow any sequence of array-like objects @overload def concatenate( # type: ignore[misc] From 2c190768902c4dcf6454bc41df1b0e378099d42c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 23:07:11 +0200 Subject: [PATCH 109/294] STY: ruff/isort config tweaks (#29183) * DEV: import-related ruff config tweaks * STY: run ``ruff check --fix`` to fix the new ``I001`` errors --- numpy/_core/_dtype.pyi | 1 - numpy/_core/_internal.pyi | 1 - numpy/_core/_ufunc_config.pyi | 3 +-- numpy/_core/arrayprint.pyi | 1 - numpy/_core/defchararray.pyi | 1 - numpy/_core/fromnumeric.pyi | 3 +-- numpy/_core/function_base.pyi | 3 +-- numpy/_core/multiarray.pyi | 3 +-- numpy/_core/records.pyi | 3 +-- numpy/_core/tests/test_api.py | 2 +- numpy/_core/tests/test_argparse.py | 4 ++-- numpy/_core/tests/test_array_coercion.py | 4 ++-- numpy/_core/tests/test_arraymethod.py | 2 +- numpy/_core/tests/test_casting_unittests.py | 2 +- numpy/_core/tests/test_conversion_utils.py | 2 +- numpy/_core/tests/test_cpu_dispatcher.py | 3 +-- numpy/_core/tests/test_cpu_features.py | 1 + numpy/_core/tests/test_custom_dtypes.py | 4 ++-- numpy/_core/tests/test_deprecations.py | 4 ++-- numpy/_core/tests/test_dtype.py | 4 ++-- numpy/_core/tests/test_extint128.py | 2 +- numpy/_core/tests/test_hashtable.py | 1 + numpy/_core/tests/test_indexing.py | 2 +- numpy/_core/tests/test_mem_overlap.py | 2 +- numpy/_core/tests/test_multiarray.py | 4 ++-- numpy/_core/tests/test_nditer.py | 2 +- numpy/_core/tests/test_numeric.py | 2 +- numpy/_core/tests/test_scalarbuffer.py | 4 ++-- numpy/_core/tests/test_scalarmath.py | 2 +- numpy/_core/tests/test_simd.py | 2 +- numpy/_core/tests/test_ufunc.py | 6 +++--- numpy/_core/tests/test_umath_accuracy.py | 2 +- numpy/_core/tests/test_umath_complex.py | 5 +++-- numpy/_typing/_nbit_base.pyi | 1 - numpy/_utils/__init__.pyi | 3 +-- numpy/_utils/_inspect.pyi | 3 +-- numpy/_utils/_pep440.pyi | 1 - numpy/ctypeslib/_ctypeslib.pyi | 3 +-- numpy/dtypes.pyi | 1 - numpy/f2py/_backends/_meson.pyi | 1 - numpy/f2py/_src_pyf.pyi | 3 +-- numpy/f2py/auxfuncs.pyi | 3 +-- numpy/f2py/crackfortran.pyi | 3 +-- numpy/f2py/f2py2e.pyi | 1 - numpy/f2py/rules.pyi | 1 - numpy/f2py/symbolic.pyi | 1 - numpy/fft/helper.pyi | 1 - numpy/lib/_arraysetops_impl.pyi | 1 - numpy/lib/_arrayterator_impl.pyi | 1 - numpy/lib/_datasource.pyi | 3 +-- numpy/lib/_format_impl.pyi | 3 +-- numpy/lib/_function_base_impl.pyi | 3 +-- numpy/lib/_index_tricks_impl.pyi | 3 +-- numpy/lib/_npyio_impl.pyi | 15 +++++++-------- numpy/lib/_shape_base_impl.pyi | 1 - numpy/lib/_type_check_impl.pyi | 3 +-- numpy/lib/_user_array_impl.pyi | 3 +-- numpy/lib/_utils_impl.pyi | 3 +-- numpy/lib/recfunctions.pyi | 3 +-- numpy/ma/core.pyi | 3 +-- numpy/polynomial/_polybase.pyi | 1 - numpy/random/bit_generator.pyi | 3 +-- numpy/testing/_private/utils.pyi | 5 ++--- numpy/testing/overrides.pyi | 1 - numpy/testing/print_coercion_tables.pyi | 1 - ruff.toml | 15 +++++++++++++++ 66 files changed, 80 insertions(+), 104 deletions(-) diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index 6cdd77b22e07..adb38583783f 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,6 +1,5 @@ from typing import Final, TypeAlias, TypedDict, overload, type_check_only from typing import Literal as L - from typing_extensions import ReadOnly, TypeVar import numpy as np diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 3038297b6328..04c26ca284db 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -2,7 +2,6 @@ import ctypes as ct import re from collections.abc import Callable, Iterable from typing import Any, Final, Generic, Self, overload - from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 1a6613154072..86df9827d652 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,8 +1,7 @@ +from _typeshed import SupportsWrite from collections.abc import Callable from typing import Any, Literal, TypeAlias, TypedDict, type_check_only -from _typeshed import SupportsWrite - from numpy import errstate as errstate _ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index fec03a6f265c..967cc09e6a25 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -13,7 +13,6 @@ from typing import ( overload, type_check_only, ) - from typing_extensions import deprecated import numpy as np diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 43005745bfab..26a5af432824 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,6 +1,5 @@ from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index f0f83093c3b1..2aedc727e6dc 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import ( Any, @@ -11,8 +12,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import deprecated import numpy as np diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 44d1311f5b44..600265b1fd0a 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from typing import Literal as L from typing import SupportsIndex, TypeAlias, TypeVar, overload -from _typeshed import Incomplete - import numpy as np from numpy._typing import ( DTypeLike, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 13a3f0077ce0..91ff666688bb 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,5 +1,6 @@ # TODO: Sort out any and all missing functions in this namespace import datetime as dt +from _typeshed import StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -18,8 +19,6 @@ from typing import ( from typing import ( Literal as L, ) - -from _typeshed import StrOrBytesPath, SupportsLenAndGetItem from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 93177b2d3f75..ead165918478 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,5 +1,6 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false +from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from typing import ( Any, @@ -11,8 +12,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import StrOrBytesPath from typing_extensions import TypeVar import numpy as np diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 25990536809b..d427ac0399a2 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -1,10 +1,10 @@ import sys import pytest -from numpy._core._rational_tests import rational import numpy as np import numpy._core.umath as ncu +from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, assert_, diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index 7f949c1059eb..0c49ec00277e 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -14,14 +14,14 @@ def func(arg1, /, arg2, *, arg3): import threading import pytest + +import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, ) from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) - -import numpy as np from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 883aee63ac3a..a3939daa8904 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -6,12 +6,12 @@ from itertools import permutations, product -import numpy._core._multiarray_umath as ncu import pytest -from numpy._core._rational_tests import rational from pytest import param import numpy as np +import numpy._core._multiarray_umath as ncu +from numpy._core._rational_tests import rational from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index d8baef7e7fbf..5b3d51585718 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -7,9 +7,9 @@ from typing import Any import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl class TestResolveDescriptors: diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index f8441ea9d0d7..91ecc0dc75b0 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -12,9 +12,9 @@ import textwrap import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_array_equal diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index 03ba33957821..d63ca9e58df5 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -3,9 +3,9 @@ """ import re -import numpy._core._multiarray_tests as mt import pytest +import numpy._core._multiarray_tests as mt from numpy._core.multiarray import CLIP, RAISE, WRAP from numpy.testing import assert_raises diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index 0a47685d0397..fc9d5e3147e0 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -1,10 +1,9 @@ +from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, __cpu_features__, ) - -from numpy._core import _umath_tests from numpy.testing import assert_equal diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index d1e3dc610d49..ecc806e9c0e5 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -6,6 +6,7 @@ import sys import pytest + from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 66e6de35b427..3336286d8c98 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -1,12 +1,12 @@ from tempfile import NamedTemporaryFile import pytest + +import numpy as np from numpy._core._multiarray_umath import ( _discover_array_parameters as discover_array_params, ) from numpy._core._multiarray_umath import _get_sfloat_dtype - -import numpy as np from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index cb552357fc96..c4acbf9d2d69 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -6,11 +6,11 @@ import contextlib import warnings -import numpy._core._struct_ufunc_tests as struct_ufunc import pytest -from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 import numpy as np +import numpy._core._struct_ufunc_tests as struct_ufunc +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 from numpy.testing import assert_raises, temppath diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 684672a9b71f..d9bd17c48434 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -11,11 +11,11 @@ import hypothesis import pytest from hypothesis.extra import numpy as hynp -from numpy._core._multiarray_tests import create_custom_field_dtype -from numpy._core._rational_tests import rational import numpy as np import numpy.dtypes +from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, IS_PYSTON, diff --git a/numpy/_core/tests/test_extint128.py b/numpy/_core/tests/test_extint128.py index 1a05151ac6be..6e4d74b81d39 100644 --- a/numpy/_core/tests/test_extint128.py +++ b/numpy/_core/tests/test_extint128.py @@ -2,10 +2,10 @@ import itertools import operator -import numpy._core._multiarray_tests as mt import pytest import numpy as np +import numpy._core._multiarray_tests as mt from numpy.testing import assert_equal, assert_raises INT64_MAX = np.iinfo(np.int64).max diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 74be5219a287..25a7158aaf6f 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,6 +1,7 @@ import random import pytest + from numpy._core._multiarray_tests import identityhash_tester diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 757b8d72782f..81ba85ea4648 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -5,9 +5,9 @@ from itertools import product import pytest -from numpy._core._multiarray_tests import array_indexing import numpy as np +from numpy._core._multiarray_tests import array_indexing from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 78b943854679..240ea62850ee 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -1,10 +1,10 @@ import itertools import pytest -from numpy._core._multiarray_tests import internal_overlap, solve_diophantine import numpy as np from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7603449ba28e..146431c7a7c0 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -21,11 +21,11 @@ from datetime import datetime, timedelta from decimal import Decimal -import numpy._core._multiarray_tests as _multiarray_tests import pytest -from numpy._core._rational_tests import rational import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests +from numpy._core._rational_tests import rational from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale from numpy.exceptions import AxisError, ComplexWarning diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index ec28e48c5046..a29a49bfb71a 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -2,10 +2,10 @@ import sys import textwrap -import numpy._core._multiarray_tests as _multiarray_tests import pytest import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests import numpy._core.umath as ncu from numpy import all, arange, array, nditer from numpy.testing import ( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8e786bf13d9e..8a72e4bfa65d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -9,11 +9,11 @@ from hypothesis import given from hypothesis import strategies as st from hypothesis.extra import numpy as hynp -from numpy._core._rational_tests import rational import numpy as np from numpy import ma from numpy._core import sctypes +from numpy._core._rational_tests import rational from numpy._core.numerictypes import obj2sctype from numpy.exceptions import AxisError from numpy.random import rand, randint, randn diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index c957aec4f9b2..4d2744b85e53 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -2,10 +2,10 @@ Test scalar buffer interface adheres to PEP 3118 """ import pytest -from numpy._core._multiarray_tests import get_buffer_info -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index fc37897bb7f7..746b410f79d2 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -9,9 +9,9 @@ from hypothesis import given, settings from hypothesis.extra import numpy as hynp from hypothesis.strategies import sampled_from -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._rational_tests import rational from numpy._utils import _pep440 from numpy.exceptions import ComplexWarning from numpy.testing import ( diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 697d89bcc26c..acea4315e679 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -6,8 +6,8 @@ import re import pytest -from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._multiarray_umath import __cpu_baseline__ from numpy._core._simd import clear_floatstatus, get_floatstatus, targets diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index f2b3f5a35a37..21ebc02c2625 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -4,13 +4,13 @@ import sys import warnings -import numpy._core._operand_flag_tests as opflag_tests -import numpy._core._rational_tests as _rational_tests -import numpy._core._umath_tests as umt import pytest from pytest import param import numpy as np +import numpy._core._operand_flag_tests as opflag_tests +import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt import numpy._core.umath as ncu import numpy.linalg._umath_linalg as uml from numpy.exceptions import AxisError diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 5707e9279d5b..da9419d63a8a 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -4,9 +4,9 @@ from os import path import pytest -from numpy._core._multiarray_umath import __cpu_features__ import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ from numpy.testing import assert_array_max_ulp from numpy.testing._private.utils import _glibc_older_than diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index a97af475def4..8f6f5c682a91 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -1,11 +1,12 @@ import platform import sys -# import the c-extension module directly since _arg is not exported via umath -import numpy._core._multiarray_umath as ncu import pytest import numpy as np + +# import the c-extension module directly since _arg is not exported via umath +import numpy._core._multiarray_umath as ncu from numpy.testing import ( assert_almost_equal, assert_array_equal, diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index ccf8f5ceac45..d88c9f4d9fd9 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -3,7 +3,6 @@ # mypy: disable-error-code=misc from typing import final - from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index f3472df9a554..2ed4e88b3e32 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,8 +1,7 @@ +from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, TypeVar, overload, type_check_only -from _typeshed import IdentityFunction - from ._convertions import asbytes as asbytes from ._convertions import asunicode as asunicode diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index d53c3c40fcf5..40546d2f4497 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,8 +1,7 @@ import types +from _typeshed import SupportsLenAndGetItem from collections.abc import Callable, Mapping from typing import Any, Final, TypeAlias, TypeVar, overload - -from _typeshed import SupportsLenAndGetItem from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 29dd4c912aa9..2c338d4e5b14 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -13,7 +13,6 @@ from typing import ( from typing import ( Literal as L, ) - from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index e26d6052eaae..aecb3899bdf5 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,6 +1,7 @@ # NOTE: Numpy's mypy plugin is used for importing the correct # platform-specific `ctypes._SimpleCData[int]` sub-type import ctypes +from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp from typing import ( @@ -13,8 +14,6 @@ from typing import ( ) from typing import Literal as L -from _typeshed import StrOrBytesPath - import numpy as np from numpy import ( byte, diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 007dc643c0e3..f76b08fc28dc 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -12,7 +12,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index b9f959537214..67baf9b76845 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable from pathlib import Path from typing import Final from typing import Literal as L - from typing_extensions import override from ._backend import Backend diff --git a/numpy/f2py/_src_pyf.pyi b/numpy/f2py/_src_pyf.pyi index f5aecbf1decd..50ddd07bf638 100644 --- a/numpy/f2py/_src_pyf.pyi +++ b/numpy/f2py/_src_pyf.pyi @@ -1,9 +1,8 @@ import re +from _typeshed import StrOrBytesPath from collections.abc import Mapping from typing import Final -from _typeshed import StrOrBytesPath - routine_start_re: Final[re.Pattern[str]] = ... routine_end_re: Final[re.Pattern[str]] = ... function_start_re: Final[re.Pattern[str]] = ... diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index f2ff09faf33b..dfbae5c7d94d 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,10 +1,9 @@ +from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show from typing import Any, Final, Never, TypeAlias, TypeVar, overload from typing import Literal as L -from _typeshed import FileDescriptorOrPath - from .cfuncs import errmess __all__ = [ diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 6b08f8784f01..c5f4fd7585ba 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,10 +1,9 @@ import re +from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload from typing import Literal as L -from _typeshed import StrOrBytesPath, StrPath - from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index dd1d0c39e8a5..03aeffc5dcdd 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -3,7 +3,6 @@ import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType from typing import Any, Final, NotRequired, TypedDict, type_check_only - from typing_extensions import TypeVar, override from .__version__ import version diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index aa91e942698a..58614060ba87 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Iterable, Mapping from typing import Any, Final, TypeAlias from typing import Literal as L - from typing_extensions import TypeVar from .__version__ import version diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 74e7a48ab327..e7b14f751dc3 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable, Mapping from enum import Enum from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar __all__ = ["Expr"] diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 887cbe7e27c9..7cf391a12e1d 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,6 +1,5 @@ from typing import Any from typing import Literal as L - from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index a7ad5b9d91e7..4279b809f78e 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,6 +1,5 @@ from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index e1a9e056a6e1..5fd589a3ac36 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -3,7 +3,6 @@ from collections.abc import Generator from types import EllipsisType from typing import Any, Final, TypeAlias, overload - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index 9f91fdf893a0..ad52b7f67af0 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,8 +1,7 @@ +from _typeshed import OpenBinaryMode, OpenTextMode from pathlib import Path from typing import IO, Any, TypeAlias -from _typeshed import OpenBinaryMode, OpenTextMode - _Mode: TypeAlias = OpenBinaryMode | OpenTextMode ### diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index 870c2d761bb0..b45df02796d7 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,7 +1,6 @@ import os -from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard - from _typeshed import SupportsRead, SupportsWrite +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard import numpy as np import numpy.typing as npt diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 090fb233dde1..cb6e18b53fa4 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -13,8 +14,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeIs, deprecated import numpy as np diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 7ac2b3a093e0..c4509d9aa3ad 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from collections.abc import Sequence from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 40369c55f63d..94f014ccd52d 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,5 +1,12 @@ import types import zipfile +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern from typing import ( @@ -14,14 +21,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - -from _typeshed import ( - StrOrBytesPath, - StrPath, - SupportsKeysAndGetItem, - SupportsRead, - SupportsWrite, -) from typing_extensions import TypeVar, deprecated, override import numpy as np diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index a50d372bb97e..0206d95109fa 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -9,7 +9,6 @@ from typing import ( overload, type_check_only, ) - from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 944015e423bb..b9ab2a02f5f5 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from collections.abc import Container, Iterable from typing import Any, Protocol, TypeAlias, overload, type_check_only from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 13c0a0163421..0aeec42129af 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,6 @@ +from _typeshed import Incomplete from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeVar, override import numpy as np diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 6fbd26f88084..7a34f273c423 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,6 +1,5 @@ -from typing import LiteralString - from _typeshed import SupportsWrite +from typing import LiteralString from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index 073642918af3..a0b49ba1df00 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -1,7 +1,6 @@ +from _typeshed import Incomplete from collections.abc import Callable, Iterable, Mapping, Sequence from typing import Any, Literal, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeVar import numpy as np diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index da4ad3b333db..de6db7873faa 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,10 +1,9 @@ # pyright: reportIncompatibleMethodOverride=false # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeIs, TypeVar import numpy as np diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 6d71a8cb8d2c..30c906fa3b4b 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -13,7 +13,6 @@ from typing import ( TypeAlias, overload, ) - from typing_extensions import TypeIs, TypeVar import numpy as np diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 6ce4f4b9d6a1..ee4499dee1f3 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,4 +1,5 @@ import abc +from _typeshed import Incomplete from collections.abc import Callable, Mapping, Sequence from threading import Lock from typing import ( @@ -12,8 +13,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 4e3b60a0ef70..59a7539b69f1 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -3,6 +3,7 @@ import sys import types import unittest import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager from pathlib import Path @@ -23,10 +24,8 @@ from typing import ( type_check_only, ) from typing import Literal as L -from unittest.case import SkipTest - -from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from typing_extensions import TypeVar +from unittest.case import SkipTest import numpy as np from numpy._typing import ( diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi index 3fefc3f350da..916154c155b1 100644 --- a/numpy/testing/overrides.pyi +++ b/numpy/testing/overrides.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Hashable from typing import Any - from typing_extensions import TypeIs import numpy as np diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi index c859305f2350..f463a18c05e4 100644 --- a/numpy/testing/print_coercion_tables.pyi +++ b/numpy/testing/print_coercion_tables.pyi @@ -1,6 +1,5 @@ from collections.abc import Iterable from typing import ClassVar, Generic, Self - from typing_extensions import TypeVar import numpy as np diff --git a/ruff.toml b/ruff.toml index deb52e834df9..7454c6c05e5b 100644 --- a/ruff.toml +++ b/ruff.toml @@ -16,6 +16,9 @@ extend-exclude = [ line-length = 88 +[format] +line-ending = "lf" + [lint] preview = true extend-select = [ @@ -115,3 +118,15 @@ ignore = [ "numpy/ma/core.pyi" = ["F403", "F405"] "numpy/matlib.py" = ["F405"] "numpy/matlib.pyi" = ["F811"] + +[lint.flake8-builtins] +builtins-allowed-modules = ["random", "typing"] + +[lint.flake8-import-conventions.extend-aliases] +"numpy" = "np" +"numpy.typing" = "npt" + +[lint.isort] +# these are treated as stdlib within .pyi stubs +extra-standard-library = ["_typeshed", "typing_extensions"] +known-first-party = ["numpy"] From 1ea01c57b9dfe7948eb05c13d9a379459e0896b4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 12 Jun 2025 02:25:09 +0200 Subject: [PATCH 110/294] MAINT: bump ``ruff`` to ``0.11.13`` (#29186) --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 5f1ee5e81a5f..770a83218133 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.11.9 + - ruff=0.11.13 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 0716b235ec9c..45319571b561 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,3 @@ # keep in sync with `environment.yml` -ruff==0.11.9 +ruff==0.11.13 GitPython>=3.1.30 From 303095ed53a46110b14ccb36e0f683cb961a1746 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 12 Jun 2025 09:21:18 +0300 Subject: [PATCH 111/294] BUG: fix matmul with transposed out arg (#29179) * BUG: fix matmul with transposed out arg * DOC: add release note * fixes from review --- doc/release/upcoming_changes/29179.change.rst | 4 ++++ doc/source/release/2.3.0-notes.rst | 6 ++++++ numpy/_core/src/umath/matmul.c.src | 2 +- numpy/_core/tests/test_multiarray.py | 6 ++++++ 4 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/29179.change.rst diff --git a/doc/release/upcoming_changes/29179.change.rst b/doc/release/upcoming_changes/29179.change.rst new file mode 100644 index 000000000000..12eb6804d3dd --- /dev/null +++ b/doc/release/upcoming_changes/29179.change.rst @@ -0,0 +1,4 @@ +Fix bug in ``matmul`` for non-contiguous out kwarg parameter +------------------------------------------------------------ +In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause +memory corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index faad9ffcc8eb..4c3c923b3b5e 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -414,6 +414,12 @@ the best performance. (`gh-28769 `__) +Performance improvements for ``np.matmul`` +------------------------------------------ +Enable using BLAS for ``matmul`` even when operands are non-contiguous by copying +if needed. + +(`gh-23752 `__) Changes ======= diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index d9be7b1d6826..02c4fde56bf2 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -596,7 +596,7 @@ NPY_NO_EXPORT void * Use transpose equivalence: * matmul(a, b, o) == matmul(b.T, a.T, o.T) */ - if (o_f_blasable) { + if (o_transpose) { @TYPE@_matmul_matrixmatrix( ip2_, is2_p_, is2_n_, ip1_, is1_n_, is1_m_, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 146431c7a7c0..11bb80ac1985 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7317,6 +7317,12 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) + # matrix matrix, issue 29164 + if [len(args[0].shape), len(args[1].shape)] == [2, 2]: + out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') + r4 = np.matmul(*args, out=out_f[::2, ::2]) + assert_equal(r2, r4) + def test_matmul_object(self): import fractions From a811c05394a2a052a5b95b130f415bea4ccd542e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 12 Jun 2025 14:36:00 +0200 Subject: [PATCH 112/294] TYP: add ``__all__`` in ``numpy._core.__init__`` (#29187) Ported from numpy/numtype#156 --- numpy/_core/__init__.pyi | 668 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 666 insertions(+), 2 deletions(-) diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index 40d9c411b97c..a8884917f34a 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -1,2 +1,666 @@ -# NOTE: The `np._core` namespace is deliberately kept empty due to it -# being private +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .fromnumeric import transpose as permute_dims +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numeric import concatenate as concat +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) +from .umath import absolute as abs +from .umath import arccos as acos +from .umath import arccosh as acosh +from .umath import arcsin as asin +from .umath import arcsinh as asinh +from .umath import arctan as atan +from .umath import arctan2 as atan2 +from .umath import arctanh as atanh +from .umath import invert as bitwise_invert +from .umath import left_shift as bitwise_left_shift +from .umath import power as pow +from .umath import right_shift as bitwise_right_shift + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] From b88c248f92442a0df7ecd24ad887053501af979c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 17:36:23 +0000 Subject: [PATCH 113/294] MAINT: Bump github/codeql-action from 3.28.19 to 3.29.0 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.19 to 3.29.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fca7ace96b7d713c7035871441bd52efbe39e27e...ce28f5bb42b7a9f2c824e633a3f6ee835bab6858) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index e0318652d2af..0342fd92c924 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9e21251f87c8..e64789006e2c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v2.1.27 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.1.27 with: sarif_file: results.sarif From 8c2dec1f824ecd366f88e423ef86a6507b1a329f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Rozet?= Date: Fri, 13 Jun 2025 11:44:29 +0200 Subject: [PATCH 114/294] BUG: Revert `np.vectorize` casting to legacy behavior (#29196) Revert use of no `dtype=object` to ensure correct cast behavior when the output dtype is discovered. Co-authored-by: Sebastian Berg --- numpy/lib/_function_base_impl.py | 4 +++- numpy/lib/tests/test_function_base.py | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f217af64fb4b..096043e6316f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2573,6 +2573,7 @@ def _get_ufunc_and_otypes(self, func, args): # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') @@ -2618,8 +2619,9 @@ def _vectorize_call(self, func, args): elif not args: res = func() else: - args = [asanyarray(a, dtype=object) for a in args] ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] outputs = ufunc(*args, out=...) if ufunc.nout == 1: diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 50c61e6e04fa..f2dba193c849 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1732,6 +1732,15 @@ def test_string_ticket_1892(self): s = '0123456789' * 10 assert_equal(s, f(s)) + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] From 79deb8f21ebe83c0d5e0f4bf2fb53887a31ad5cf Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Fri, 13 Jun 2025 08:07:53 -0600 Subject: [PATCH 115/294] DOC: Suppress distutils doc build warnings for python 3.12+ (#29160) Enables an error free build of the docs for python 3.12+. Excludes related files, supresses all warnings for excluded files, and ignores case-by-case individual references. Closes #29131. [skip azp][skip cirrus][skip actions] --- doc/source/conf.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e3146bf768c9..eba0bd014fb0 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -150,8 +150,26 @@ class PyTypeObject(ctypes.Structure): exclude_dirs = [] exclude_patterns = [] +suppress_warnings = [] +nitpick_ignore = [] + if sys.version_info[:2] >= (3, 12): - exclude_patterns += ["reference/distutils.rst"] + exclude_patterns += [ + "reference/distutils.rst", + "reference/distutils/misc_util.rst", + ] + suppress_warnings += [ + 'toc.excluded', # Suppress warnings about excluded toctree entries + ] + nitpicky = True + nitpick_ignore += [ + ('ref', 'numpy-distutils-refguide'), + # The first ignore is not catpured without nitpicky = True. + # These three ignores are required once nitpicky = True is set. + ('py:mod', 'numpy.distutils'), + ('py:class', 'Extension'), + ('py:class', 'numpy.distutils.misc_util.Configuration'), + ] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -612,7 +630,7 @@ class NumPyLexer(CLexer): breathe_default_members = ("members", "undoc-members", "protected-members") # See https://github.com/breathe-doc/breathe/issues/696 -nitpick_ignore = [ +nitpick_ignore += [ ('c:identifier', 'FILE'), ('c:identifier', 'size_t'), ('c:identifier', 'PyHeapTypeObject'), From 0b74c624e2791d5ee9ae3bec59e5b101d7e2f864 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 13 Jun 2025 21:04:39 +0200 Subject: [PATCH 116/294] TYP: fix ``ndarray.__array__`` annotation for ``copy`` (#29204) --- numpy/__init__.pyi | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d71a76569ad7..272e52f88e83 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2069,13 +2069,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__( - self, dtype: None = ..., /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__( - self, dtype: _DTypeT, /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... def __array_ufunc__( self, From 470943bf78824b12081df5b8c097c3511d42175f Mon Sep 17 00:00:00 2001 From: Michael Date: Fri, 13 Jun 2025 21:11:29 +0200 Subject: [PATCH 117/294] TST: additional tests for matmul with non-contiguous input and output (#29197) --- numpy/_core/tests/test_multiarray.py | 32 +++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 11bb80ac1985..f66109a3d8b5 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7317,11 +7317,33 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) - # matrix matrix, issue 29164 - if [len(args[0].shape), len(args[1].shape)] == [2, 2]: - out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') - r4 = np.matmul(*args, out=out_f[::2, ::2]) - assert_equal(r2, r4) + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes]*3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) def test_matmul_object(self): import fractions From 2f4ace73ee323932f2e5b9e32da696b47e7bda51 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 15 Jun 2025 15:21:03 +0300 Subject: [PATCH 118/294] BUG: fix linting (#29210) --- numpy/_core/tests/test_multiarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index f66109a3d8b5..b164f1dada3b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7333,7 +7333,7 @@ def apply_mode(m, mode): (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order )[::2, ::2] retval[...] = m - return retval + return retval is_complex = np.issubdtype(dtype, np.complexfloating) m1 = self.m1.astype(dtype) + (1j if is_complex else 0) @@ -7341,7 +7341,7 @@ def apply_mode(m, mode): dot_res = np.dot(m1, m2) mo = np.zeros_like(dot_res) - for mode in itertools.product(*[modes]*3): + for mode in itertools.product(*[modes] * 3): m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) From aea8869549c110c46afbe75ed2e162b702d3d106 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:09:16 +0530 Subject: [PATCH 119/294] CI: Add WoA validation setup to windows.yml --- .github/workflows/windows.yml | 74 +++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e760e37780a7..18d02081fd67 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -85,6 +85,80 @@ jobs: run: | spin test -- --timeout=600 --durations=10 + python64bit_openblas_winarm64: + name: arm64, LPARM64 OpenBLAS + runs-on: windows-11-arm + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + strategy: + fail-fast: false + matrix: + compiler-pyversion: + - ["MSVC", "3.11"] + - ["Clang-cl", "3.14t-dev"] + + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: ${{ matrix.compiler-pyversion[1] }} + architecture: arm64 + + - name: Setup MSVC + if: matrix.compiler-pyversion[0] == 'MSVC' + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: arm64 + + - name: Install build dependencies from PyPI + run: | + pip install -r requirements/build_requirements.txt + + - name: Install pkg-config + run: | + choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV + + - name: Install Clang-cl + if: matrix.compiler-pyversion[0] == 'Clang-cl' + run: | + uses: ./.github/windows_arm64_steps + + - name: Install NumPy (MSVC) + if: matrix.compiler-pyversion[0] == 'MSVC' + run: | + pip install -r requirements/ci_requirements.txt + spin build --with-scipy-openblas=32 -j2 -- --vsenv + + - name: Install NumPy (Clang-cl) + if: matrix.compiler-pyversion[0] == 'Clang-cl' + run: | + "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-arm64.ini -Encoding ascii + pip install -r requirements/ci_requirements.txt + spin build --with-scipy-openblas=32 -j2 -- --vsenv --native-file=$PWD/clang-cl-arm64.ini + + - name: Meson Log + shell: bash + if: ${{ failure() }} + run: | + cat build/meson-logs/meson-log.txt + + - name: Install test dependencies + run: | + python -m pip install -r requirements/test_requirements.txt + python -m pip install threadpoolctl + + - name: Run test suite + run: | + spin test -- --timeout=600 --durations=10 + msvc_python_no_openblas: name: MSVC, ${{ matrix.architecture }} Python , no BLAS runs-on: ${{ matrix.os }} From d8c4f2ba8604fbb0e7f22a5df8ea3a355a53385e Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:12:41 +0530 Subject: [PATCH 120/294] CI: Create action.yml for LLVM Win-ARM64 as reusable blocks --- .github/windows_arm64_steps /action.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/windows_arm64_steps /action.yml diff --git a/.github/windows_arm64_steps /action.yml b/.github/windows_arm64_steps /action.yml new file mode 100644 index 000000000000..86517b6246e7 --- /dev/null +++ b/.github/windows_arm64_steps /action.yml @@ -0,0 +1,16 @@ +name: Build Dependencies(Win-ARM64) +description: "Setup LLVM for Win-ARM64 builds" + +runs: + using: "composite" + steps: + - name: Install LLVM + shell: pwsh + run: | + Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait + echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "CXX=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "FC=flang-new" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + From f02bb58cb634286739a1c5eb9dc35bc0e086efa7 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:16:56 +0530 Subject: [PATCH 121/294] CI: Modify wheel.yml to use clang-cl for Win-ARM64 --- .github/workflows/wheels.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 68352eb1fc7c..ab6bbb618899 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -127,11 +127,9 @@ jobs: with: architecture: 'x86' - - name: Setup MSVC arm64 + - name: Setup LLVM for Windows ARM64 if: ${{ matrix.buildplat[1] == 'win_arm64' }} - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: 'arm64' + uses: ./.github/windows_arm64_steps - name: pkg-config-for-win run: | From fb0dff6061addfa3b7d2cba9ecc4dfedf5dad954 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:32:03 +0530 Subject: [PATCH 122/294] CI: fix action.yml naming --- .github/{windows_arm64_steps => windows_arm64_steps}/action.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{windows_arm64_steps => windows_arm64_steps}/action.yml (100%) diff --git a/.github/windows_arm64_steps /action.yml b/.github/windows_arm64_steps/action.yml similarity index 100% rename from .github/windows_arm64_steps /action.yml rename to .github/windows_arm64_steps/action.yml From f36c0daa423244ca6887ab426e3c6149a13a2667 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:40:39 +0530 Subject: [PATCH 123/294] CI: Fix reusable LLVM block --- .github/workflows/windows.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 18d02081fd67..e723b787a5de 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -128,8 +128,7 @@ jobs: - name: Install Clang-cl if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - uses: ./.github/windows_arm64_steps + uses: ./.github/windows_arm64_steps - name: Install NumPy (MSVC) if: matrix.compiler-pyversion[0] == 'MSVC' From 03d57303675386998ad005d28c7c3ca7177694d1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 17 Jun 2025 17:49:15 +0200 Subject: [PATCH 124/294] MAINT: Fix some undef warnings (#29216) As noted by Chuck in gh-29138, there are some undef warnings that seem not nice this should fix them. The fact that `NPY_LONG`, etc. are not defined at macro expansion time is a bit of a trap, maybe it would be nice to have CI fail for this... --- numpy/_core/src/_simd/_simd.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/umath/_operand_flag_tests.c | 2 +- numpy/_core/src/umath/_rational_tests.c | 2 +- numpy/_core/src/umath/_struct_ufunc_tests.c | 2 +- numpy/_core/src/umath/_umath_tests.c.src | 2 +- .../_core/tests/examples/limited_api/limited_api_latest.c | 8 ++++---- numpy/f2py/rules.py | 2 +- numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/_simd/_simd.c b/numpy/_core/src/_simd/_simd.c index 2f0a5df6375c..d25d7bbf1c38 100644 --- a/numpy/_core/src/_simd/_simd.c +++ b/numpy/_core/src/_simd/_simd.c @@ -88,7 +88,7 @@ PyMODINIT_FUNC PyInit__simd(void) NPY_MTARGETS_CONF_DISPATCH(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) NPY_MTARGETS_CONF_BASELINE(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 5708e5c6ecb7..f520e3c4bceb 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2095,7 +2095,7 @@ static PyMemberDef arraydescr_members[] = { {"alignment", T_PYSSIZET, offsetof(PyArray_Descr, alignment), READONLY, NULL}, {"flags", -#if NPY_ULONGLONG == NPY_UINT64 +#if NPY_SIZEOF_LONGLONG == 8 T_ULONGLONG, offsetof(PyArray_Descr, flags), READONLY, NULL}, #else #error Assuming long long is 64bit, if not replace with getter function. diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 9747b7946512..5cdff6220280 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -77,7 +77,7 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK; PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index a95c89b373df..d257bc22d051 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1355,7 +1355,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers"); GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers"); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index 8edbdc00b6f3..56c4be117e44 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -157,7 +157,7 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyDict_SetItemString(d, "add_triplet", add_triplet); Py_DECREF(add_triplet); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 9f2818d14526..845f51ebc94f 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -944,7 +944,7 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/numpy/_core/tests/examples/limited_api/limited_api_latest.c index 13668f2f0ebf..92d83ea977a1 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api_latest.c +++ b/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -1,11 +1,11 @@ -#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 - # error "Py_LIMITED_API not defined to Python major+minor version" -#endif - #include #include #include +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + static PyModuleDef moduledef = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "limited_api_latest" diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 667ef287f92b..68c49e60028e 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -286,7 +286,7 @@ #initcommonhooks# #interface_usercode# -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m , #gil_used#); #endif diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index b66672a43e21..25866f1a40ec 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -223,7 +223,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif From 3f99204758206967cce4af0f460de4946443f62d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 17 Jun 2025 19:30:32 +0200 Subject: [PATCH 125/294] MAINT: bump `mypy` to `1.16.1` (#29219) --- environment.yml | 2 +- requirements/test_requirements.txt | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/environment.yml b/environment.yml index 770a83218133..75c6626abaf8 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.16.0 + - mypy=1.16.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 17260753db4a..e50919ef4f7b 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,9 +12,8 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.16.0; platform_python_implementation != "PyPy" +mypy==1.16.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer tzdata - From f6a17f099ba3ae2acabc288bf2297fd821725ffc Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 17 Jun 2025 19:31:38 +0200 Subject: [PATCH 126/294] TYP: Workaround for a mypy issue in ``ndarray.__iter__`` (#29218) --- numpy/__init__.pyi | 12 ++++++++---- numpy/typing/tests/data/reveal/ndarray_misc.pyi | 10 ++++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 272e52f88e83..ce879e208e49 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -807,6 +807,7 @@ _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) _FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) _IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) _NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] _NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] @@ -2572,10 +2573,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... - @overload # == 1-d & object_ - def __iter__(self: ndarray[tuple[int], dtype[object_]], /) -> Iterator[Any]: ... - @overload # == 1-d - def __iter__(self: ndarray[tuple[int], dtype[_ScalarT]], /) -> Iterator[_ScalarT]: ... + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... @overload # >= 2-d def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... @overload # ?-d diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 465ce7679b49..4cbb90621ca9 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,6 +6,7 @@ function-based counterpart in `../from_numeric.py`. """ +from collections.abc import Iterator import ctypes as ct import operator from types import ModuleType @@ -29,6 +30,10 @@ AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + ctypes_obj = AR_f8.ctypes assert_type(AR_f8.__dlpack__(), CapsuleType) @@ -235,3 +240,8 @@ assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) assert_type(f8.__array_namespace__(), ModuleType) assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D From 32f4afaad7cb035e1b46987285d34c27c57dbe57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20Gro=C3=9F?= Date: Tue, 17 Jun 2025 19:34:26 +0200 Subject: [PATCH 127/294] ENH: improve Timsort with powersort merge-policy (#29208) Implement the improved merge policy for Timsort, as developed by Munro and Wild. Benchmarks show a significant improvement in performance. --- numpy/_core/src/npysort/timsort.cpp | 110 +++++++++++++--------------- 1 file changed, 51 insertions(+), 59 deletions(-) diff --git a/numpy/_core/src/npysort/timsort.cpp b/numpy/_core/src/npysort/timsort.cpp index 0f0f5721e7cf..9ecf88c0aeb9 100644 --- a/numpy/_core/src/npysort/timsort.cpp +++ b/numpy/_core/src/npysort/timsort.cpp @@ -39,8 +39,9 @@ #include #include -/* enough for 32 * 1.618 ** 128 elements */ -#define TIMSORT_STACK_SIZE 128 +/* enough for 32 * 1.618 ** 128 elements. + If powersort was used in all cases, 90 would suffice, as 32 * 2 ** 90 >= 32 * 1.618 ** 128 */ +#define RUN_STACK_SIZE 128 static npy_intp compute_min_run(npy_intp num) @@ -58,6 +59,7 @@ compute_min_run(npy_intp num) typedef struct { npy_intp s; /* start pointer */ npy_intp l; /* length */ + int power; /* node "level" for powersort merge strategy */ } run; /* buffer for argsort. Declared here to avoid multiple declarations. */ @@ -383,60 +385,51 @@ merge_at_(type *arr, const run *stack, const npy_intp at, buffer_ *buffer) return 0; } -template +/* See https://github.com/python/cpython/blob/ea23c897cd25702e72a04e06664f6864f07a7c5d/Objects/listsort.txt +* for a detailed explanation. +* In CPython, *num* is called *n*, but we changed it for consistency with the NumPy implementation. +*/ static int -try_collapse_(type *arr, run *stack, npy_intp *stack_ptr, buffer_ *buffer) +powerloop(npy_intp s1, npy_intp n1, npy_intp n2, npy_intp num) { - int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = merge_at_(arr, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = merge_at_(arr, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } + int result = 0; + npy_intp a = 2 * s1 + n1; /* 2*a */ + npy_intp b = a + n1 + n2; /* 2*b */ + for (;;) { + ++result; + if (a >= num) { /* both quotient bits are 1 */ + a -= num; + b -= num; } - else if (1 < top && B <= C) { - ret = merge_at_(arr, stack, top - 2, buffer); + else if (b >= num) { /* a/num bit is 0, b/num bit is 1 */ + break; + } + a <<= 1; + b <<= 1; + } + return result; +} +template +static int +found_new_run_(type *arr, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_ *buffer) +{ + int ret; + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = merge_at_(arr, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -491,7 +484,7 @@ timsort_(void *start, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_ buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -499,15 +492,14 @@ timsort_(void *start, npy_intp num) for (l = 0; l < num;) { n = count_run_((type *)start, l, num, minrun); + ret = found_new_run_((type *)start, stack, &stack_ptr, n, num, &buffer); + if (NPY_UNLIKELY(ret < 0)) + goto cleanup; + + // Push the new run onto the stack. stack[stack_ptr].s = l; stack[stack_ptr].l = n; ++stack_ptr; - ret = try_collapse_((type *)start, stack, &stack_ptr, &buffer); - - if (NPY_UNLIKELY(ret < 0)) { - goto cleanup; - } - l += n; } @@ -897,7 +889,7 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_intp buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -1371,7 +1363,7 @@ string_timsort_(void *start, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; string_buffer_ buffer; /* Items that have zero size don't make sense to sort */ @@ -1800,7 +1792,7 @@ string_atimsort_(void *start, npy_intp *tosort, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ @@ -2253,7 +2245,7 @@ npy_timsort(void *start, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_char buffer; /* Items that have zero size don't make sense to sort */ @@ -2689,7 +2681,7 @@ npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ From d1c67573569885087a253120c1a9f2caf3ccf084 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Tue, 17 Jun 2025 14:56:21 -0400 Subject: [PATCH 128/294] ENH: Detect CPU features on OpenBSD ARM and PowerPC64 Also while looking at this I noticed due to a compiler warning that npy__cpu_init_features_linux() was not enabled on FreeBSD with the original commit c47e9621ebf76f8085ff5ec8b01c07921d14f6a7 thus the code was doing nothing on FreeBSD ARM. --- numpy/_core/src/common/npy_cpu_features.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index f15f636cdb1e..617225ec9eff 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -562,7 +562,7 @@ npy__cpu_init_features(void) #elif defined(NPY_CPU_PPC64) || defined(NPY_CPU_PPC64LE) -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __FreeBSD__ #include // defines PPC_FEATURE_HAS_VSX #endif @@ -585,7 +585,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); if ((hwcap & PPC_FEATURE_HAS_VSX) == 0) @@ -612,7 +612,7 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VSX2] = (hwcap & PPC_FEATURE2_ARCH_2_07) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX3] = (hwcap & PPC_FEATURE2_ARCH_3_00) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX4] = (hwcap & PPC_FEATURE2_ARCH_3_1) != 0; -// TODO: AIX, OpenBSD +// TODO: AIX #else npy__cpu_have[NPY_CPU_FEATURE_VSX] = 1; #if defined(NPY_CPU_PPC64LE) || defined(NPY_HAVE_VSX2) @@ -698,7 +698,7 @@ npy__cpu_init_features_arm8(void) npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = 1; } -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* * we aren't sure of what kind kernel or clib we deal with * so we play it safe @@ -709,7 +709,7 @@ npy__cpu_init_features_arm8(void) #if defined(__linux__) __attribute__((weak)) unsigned long getauxval(unsigned long); // linker should handle it #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__OpenBSD__) __attribute__((weak)) int elf_aux_info(int, void *, int); // linker should handle it static unsigned long getauxval(unsigned long k) @@ -807,7 +807,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#ifdef __linux__ +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) if (npy__cpu_init_features_linux()) return; #endif From b6a6740e106277f4b1081210bbd158fed3e0d20c Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Wed, 18 Jun 2025 18:16:30 +0530 Subject: [PATCH 129/294] CI: Add conditions to check hash of LLVM package --- .github/windows_arm64_steps/action.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/windows_arm64_steps/action.yml b/.github/windows_arm64_steps/action.yml index 86517b6246e7..8ecb3b8a0cdd 100644 --- a/.github/windows_arm64_steps/action.yml +++ b/.github/windows_arm64_steps/action.yml @@ -4,10 +4,16 @@ description: "Setup LLVM for Win-ARM64 builds" runs: using: "composite" steps: - - name: Install LLVM + - name: Install LLVM with checksum verification shell: pwsh run: | Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + $expectedHash = "92f69a1134e32e54b07d51c6e24d9594852f6476f32c3d70471ae00fffc2d462" + $fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash + if ($fileHash -ne $expectedHash) { + Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with." + exit 1 + } Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append From d52b36ee0b8bf3e7ca9eec19d8c4b41ff2c04f59 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Jun 2025 19:32:32 +0200 Subject: [PATCH 130/294] strides comparison performance fix, compare discussion #29179 (#29188) --- numpy/_core/src/umath/matmul.c.src | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 02c4fde56bf2..6f54aeb4d968 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -27,6 +27,8 @@ ***************************************************************************** */ +#define ABS(x) ((x) < 0 ? -(x) : (x)) + #if defined(HAVE_CBLAS) /* * -1 to be conservative, in case blas internally uses a for loop with an @@ -554,9 +556,9 @@ NPY_NO_EXPORT void } else { /* matrix @ matrix * copy if not blasable, see gh-12365 & gh-23588 */ - npy_bool i1_transpose = is1_m < is1_n, - i2_transpose = is2_n < is2_p, - o_transpose = os_m < os_p; + npy_bool i1_transpose = ABS(is1_m) < ABS(is1_n), + i2_transpose = ABS(is2_n) < ABS(is2_p), + o_transpose = ABS(os_m) < ABS(os_p); npy_intp tmp_is1_m = i1_transpose ? sz : sz*dn, tmp_is1_n = i1_transpose ? sz*dm : sz, From 2b0eda38f099203bb72572a8b640b37165fc7dc5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 17 Jun 2025 17:49:17 +0200 Subject: [PATCH 131/294] TYP: Support iteration of ``StringDType`` arrays --- numpy/__init__.pyi | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ce879e208e49..bcbe95accee6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2577,11 +2577,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. # This way the bug only occurs for 9-D arrays, which are probably not very common. @overload - def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... @overload # == 1-d & dtype[T \ object_] def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... From f15a116ee12ec539c7ed1fafb3a0abfb82005e0f Mon Sep 17 00:00:00 2001 From: Developer-Ecosystem-Engineering <65677710+Developer-Ecosystem-Engineering@users.noreply.github.com> Date: Wed, 18 Jun 2025 13:00:27 -0700 Subject: [PATCH 132/294] BUG: Address interaction between SME and FPSR (#29223) * BUG: Address interaction between SME and FPSR This is intended to resolve https://github.com/numpy/numpy/issues/28687 The root cause is an interaction between Arm Scalable Matrix Extension (SME) and the floating point status register (FPSR). As noted in Arm docs for FPSR, "On entry to or exit from Streaming SVE mode, FPSR.{IOC, DZC, OFC, UFC, IXC, IDC, QC} are set to 1 and the remaining bits are set to 0". This means that floating point status flags are all raised when SME is used, regardless of values or operations performed. These are manifesting now because Apple Silicon M4 supports SME and macOS 15.4 enables SME codepaths for Accelerate BLAS / LAPACK. However, SME / FPSR behavior is not specific to Apple Silicon M4 and will occur on non-Apple chips using SME as well. Changes add compile and runtime checks to determine whether BLAS / LAPACK might use SME (macOS / Accelerate only at the moment). If so, special handling of floating-point error (FPE) is added, which includes: - clearing FPE after some BLAS calls - short-circuiting FPE read after some BLAS calls All tests pass Performance is similar Another approach would have been to wrap all BLAS / LAPACK calls with save / restore FPE. However, it added a lot of overhead for the inner loops that utilize BLAS / LAPACK. Some benchmarks were 8x slower. * add blas_supports_fpe and ifdef check Address the linker & linter failures --- numpy/_core/meson.build | 1 + numpy/_core/src/common/blas_utils.c | 134 ++++++++++++++++++ numpy/_core/src/common/blas_utils.h | 30 ++++ numpy/_core/src/common/cblasfuncs.c | 3 +- numpy/_core/src/multiarray/multiarraymodule.c | 5 + numpy/_core/src/umath/matmul.c.src | 36 ++++- numpy/_core/tests/test_multiarray.py | 6 + numpy/testing/_private/utils.py | 10 ++ 8 files changed, 217 insertions(+), 8 deletions(-) create mode 100644 numpy/_core/src/common/blas_utils.c create mode 100644 numpy/_core/src/common/blas_utils.h diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index a4d2050122c6..cd46a20b0246 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1117,6 +1117,7 @@ src_multiarray_umath_common = [ ] if have_blas src_multiarray_umath_common += [ + 'src/common/blas_utils.c', 'src/common/cblasfuncs.c', 'src/common/python_xerbla.c', ] diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c new file mode 100644 index 000000000000..aaf976ed70e4 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.c @@ -0,0 +1,134 @@ +#include +#include +#include + +#ifdef __APPLE__ +#include +#endif + +#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN +#include "numpy/npy_math.h" // npy_get_floatstatus_barrier +#include "blas_utils.h" + +#if NPY_BLAS_CHECK_FPE_SUPPORT + +/* Return whether we're running on macOS 15.4 or later + */ +static inline bool +is_macOS_version_15_4_or_later(void){ +#if !defined(__APPLE__) + return false; +#else + char *osProductVersion = NULL; + size_t size = 0; + bool ret = false; + + // Query how large OS version string should be + if(-1 == sysctlbyname("kern.osproductversion", NULL, &size, NULL, 0)){ + goto cleanup; + } + + osProductVersion = malloc(size + 1); + + // Get the OS version string + if(-1 == sysctlbyname("kern.osproductversion", osProductVersion, &size, NULL, 0)){ + goto cleanup; + } + + osProductVersion[size] = '\0'; + + // Parse the version string + int major = 0, minor = 0; + if(2 > sscanf(osProductVersion, "%d.%d", &major, &minor)) { + goto cleanup; + } + + if(major >= 15 && minor >= 4){ + ret = true; + } + +cleanup: + if(osProductVersion){ + free(osProductVersion); + } + + return ret; +#endif +} + +/* ARM Scalable Matrix Extension (SME) raises all floating-point error flags + * when it's used regardless of values or operations. As a consequence, + * when SME is used, all FPE state is lost and special handling is needed. + * + * For NumPy, SME is not currently used directly, but can be used via + * BLAS / LAPACK libraries. This function does a runtime check for whether + * BLAS / LAPACK can use SME and special handling around FPE is required. + */ +static inline bool +BLAS_can_use_ARM_SME(void) +{ +#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) + // ARM SME can be used by Apple's Accelerate framework for BLAS / LAPACK + // - macOS 15.4+ + // - Apple silicon M4+ + + // Does OS / Accelerate support ARM SME? + if(!is_macOS_version_15_4_or_later()){ + return false; + } + + // Does hardware support SME? + int has_SME = 0; + size_t size = sizeof(has_SME); + if(-1 == sysctlbyname("hw.optional.arm.FEAT_SME", &has_SME, &size, NULL, 0)){ + return false; + } + + if(has_SME){ + return true; + } +#endif + + // default assume SME is not used + return false; +} + +/* Static variable to cache runtime check of BLAS FPE support. + */ +static bool blas_supports_fpe = true; + +#endif // NPY_BLAS_CHECK_FPE_SUPPORT + + +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + return blas_supports_fpe; +#else + return true; +#endif +} + +NPY_VISIBILITY_HIDDEN void +npy_blas_init(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + blas_supports_fpe = !BLAS_can_use_ARM_SME(); +#endif +} + +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + if(!blas_supports_fpe){ + // BLAS does not support FPE and we need to return FPE state. + // Instead of clearing and then grabbing state, just return + // that no flags are set. + return 0; + } +#endif + char *param = NULL; + return npy_get_floatstatus_barrier(param); +} diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h new file mode 100644 index 000000000000..8c1437f88899 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.h @@ -0,0 +1,30 @@ +#include + +#include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN + +/* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check + * for floating-point error (FPE) support in BLAS. + */ +#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) +#define NPY_BLAS_CHECK_FPE_SUPPORT 1 +#else +#define NPY_BLAS_CHECK_FPE_SUPPORT 0 +#endif + +/* Initialize BLAS environment, if needed + */ +NPY_VISIBILITY_HIDDEN void +npy_blas_init(void); + +/* Runtime check if BLAS supports floating-point errors. + * true - BLAS supports FPE and one can rely on them to indicate errors + * false - BLAS does not support FPE. Special handling needed for FPE state + */ +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void); + +/* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier(). + * Otherwise, we can't rely on FPE state and need special handling. + */ +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void); diff --git a/numpy/_core/src/common/cblasfuncs.c b/numpy/_core/src/common/cblasfuncs.c index f9d683d812d4..66a215dfeb64 100644 --- a/numpy/_core/src/common/cblasfuncs.c +++ b/numpy/_core/src/common/cblasfuncs.c @@ -12,6 +12,7 @@ #include "numpy/arrayobject.h" #include "numpy/npy_math.h" #include "numpy/ufuncobject.h" +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" #include "common.h" @@ -693,7 +694,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, NPY_END_ALLOW_THREADS; } - int fpes = npy_get_floatstatus_barrier((char *) result); + int fpes = npy_get_floatstatus_after_blas(); if (fpes && PyUFunc_GiveFloatingpointErrors("dot", fpes) < 0) { goto fail; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 7724756ba351..dcfb1226a0ab 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -43,6 +43,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "arraytypes.h" #include "arrayobject.h" #include "array_converter.h" +#include "blas_utils.h" #include "hashdescr.h" #include "descriptor.h" #include "dragon4.h" @@ -4781,6 +4782,10 @@ _multiarray_umath_exec(PyObject *m) { return -1; } +#if NPY_BLAS_CHECK_FPE_SUPPORT + npy_blas_init(); +#endif + #if defined(MS_WIN64) && defined(__GNUC__) PyErr_WarnEx(PyExc_Warning, "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \ diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 6f54aeb4d968..11e014acec7f 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -16,6 +16,7 @@ +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" /* For TYPE_dot functions */ @@ -122,7 +123,7 @@ static inline void } } -NPY_NO_EXPORT void +static void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, void *op, npy_intp op_m, @@ -158,7 +159,7 @@ NPY_NO_EXPORT void is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@)); } -NPY_NO_EXPORT void +static void @name@_matmul_matrixmatrix(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_p, void *op, npy_intp os_m, npy_intp os_p, @@ -262,7 +263,7 @@ NPY_NO_EXPORT void * #IS_HALF = 0, 0, 0, 1, 0*13# */ -NPY_NO_EXPORT void +static void @TYPE@_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -320,7 +321,7 @@ NPY_NO_EXPORT void } /**end repeat**/ -NPY_NO_EXPORT void +static void BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -359,7 +360,7 @@ BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, } } -NPY_NO_EXPORT void +static void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -631,6 +632,11 @@ NPY_NO_EXPORT void #endif } #if @USEBLAS@ && defined(HAVE_CBLAS) +#if NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif if (allocate_buffer) free(tmp_ip12op); #endif } @@ -655,7 +661,7 @@ NPY_NO_EXPORT void * #prefix = c, z, 0# * #USE_BLAS = 1, 1, 0# */ -NPY_NO_EXPORT void +static void @name@_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, void *NPY_UNUSED(ignore)) { @@ -751,6 +757,7 @@ OBJECT_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT# * #DOT = dot*15, dotc*4# * #CHECK_PYERR = 0*18, 1# + * #CHECK_BLAS = 1*2, 0*13, 1*2, 0*2# */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, @@ -774,6 +781,11 @@ NPY_NO_EXPORT void } #endif } +#if @CHECK_BLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -789,7 +801,7 @@ NPY_NO_EXPORT void * #step1 = &oneF, &oneD# * #step0 = &zeroF, &zeroD# */ -NPY_NO_EXPORT void +static void @name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_m, void *op, npy_intp os_m, @@ -880,6 +892,11 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -945,5 +962,10 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index b164f1dada3b..81e55deb3daf 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -31,6 +31,7 @@ from numpy.exceptions import AxisError, ComplexWarning from numpy.lib.recfunctions import repack_fields from numpy.testing import ( + BLAS_SUPPORTS_FPE, HAS_REFCOUNT, IS_64BIT, IS_PYPY, @@ -3363,6 +3364,11 @@ def test_dot(self): @pytest.mark.parametrize("dtype", [np.half, np.double, np.longdouble]) @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") def test_dot_errstate(self, dtype): + # Some dtypes use BLAS for 'dot' operation and + # not all BLAS support floating-point errors. + if not BLAS_SUPPORTS_FPE and dtype == np.double: + pytest.skip("BLAS does not support FPE") + a = np.array([1, 1], dtype=dtype) b = np.array([-np.inf, np.inf], dtype=dtype) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index d7ceaeab72cc..65f4059f98fd 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -42,6 +42,7 @@ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', ] @@ -89,6 +90,15 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = True +if platform.system() == 'Darwin' or platform.machine() == 'arm64': + try: + blas = np.__config__.CONFIG['Build Dependencies']['blas'] + if blas['name'] == 'accelerate': + BLAS_SUPPORTS_FPE = False + except KeyError: + pass + HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 IS_MUSL = False From 5cbc0bcebbe031140b63d2ffba06b73e471b43b2 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Tue, 17 Jun 2025 19:57:18 -0400 Subject: [PATCH 133/294] ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. --- numpy/_core/src/common/npy_cpu_features.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index f15f636cdb1e..06c82fe41e27 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -846,22 +846,30 @@ npy__cpu_init_features(void) #elif defined(__riscv) && __riscv_xlen == 64 -#include +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) + #include -#ifndef HWCAP_RVV - // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 - #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #ifndef HWCAP_RVV + // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 + #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #endif #endif static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) +#ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); +#else + unsigned long hwcap; + elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)); +#endif if (hwcap & COMPAT_HWCAP_ISA_V) { npy__cpu_have[NPY_CPU_FEATURE_RVV] = 1; } +#endif } /*********** Unsupported ARCH ***********/ From 7a74e12558789ecf0252dad56a77560db7411d13 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 19 Jun 2025 01:18:27 -0600 Subject: [PATCH 134/294] BUG: avoid negating unsigned integers in resize implementation (#29230) The negation of an unsigned int underflows and creates a large positive repeats, which leads to allocations failures and/or swapping. --- numpy/_core/fromnumeric.py | 3 ++- numpy/_core/tests/test_numeric.py | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 73dcd1ddc11d..e20d774d014d 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -1607,7 +1607,8 @@ def resize(a, new_shape): # First case must zero fill. The second would have repeats == 0. return np.zeros_like(a, shape=new_shape) - repeats = -(-new_size // a.size) # ceil division + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8a72e4bfa65d..65da65ddc9f9 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -79,6 +79,13 @@ def test_negative_resize(self): with pytest.raises(ValueError, match=r"negative"): np.resize(A, new_shape=new_shape) + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + def test_subclass(self): class MyArray(np.ndarray): __array_priority__ = 1. From 35079afa808ea0fc49bb13c878150ef20c4f64fa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 19 Jun 2025 14:21:05 +0200 Subject: [PATCH 135/294] TST: Fix test that uses unininitialized memory (#29232) Tests should avoid this generally, this one is worse, since it can even fail due to warnings. --- numpy/_core/tests/test_ufunc.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 21ebc02c2625..a1cd63aec523 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2123,9 +2123,9 @@ class ArrayPriorityMinus1000b(ArrayPriorityBase): class ArrayPriorityMinus2000(ArrayPriorityBase): __array_priority__ = -2000 - x = ArrayPriorityMinus1000(2) - xb = ArrayPriorityMinus1000b(2) - y = ArrayPriorityMinus2000(2) + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) assert np.add(x, y) is ArrayPriorityMinus1000 assert np.add(y, x) is ArrayPriorityMinus1000 From 4575abf725d9f14af73ee0442b05c01d0d46a7c1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 19 Jun 2025 16:43:25 +0200 Subject: [PATCH 136/294] MAINT: bump ``ruff`` to ``0.12.0`` (#29220) --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 75c6626abaf8..17de8d3eeb5e 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.11.13 + - ruff=0.12.0 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 45319571b561..05319a9bdb8a 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,3 @@ # keep in sync with `environment.yml` -ruff==0.11.13 +ruff==0.12.0 GitPython>=3.1.30 From 7b30ce7432d0b42bb805fbf5575b0d0ba5be98ab Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 19 Jun 2025 16:47:49 +0200 Subject: [PATCH 137/294] BUG: Enforce integer limitation in concatenate (#29231) * BUG: Enforce integer limitation in concatenate Concatenate internals only deal with integer many arrays, that should be fine in practice, but a SystemError (or in principle maybe also a harder crash?) is not really. * skip 32bit systems --- numpy/_core/src/multiarray/multiarraymodule.c | 11 +++++++++-- numpy/_core/tests/test_shape_base.py | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index dcfb1226a0ab..d4766b5af7b4 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -669,10 +669,17 @@ PyArray_ConcatenateInto(PyObject *op, } /* Convert the input list into arrays */ - narrays = PySequence_Size(op); - if (narrays < 0) { + Py_ssize_t narrays_true = PySequence_Size(op); + if (narrays_true < 0) { return NULL; } + else if (narrays_true > NPY_MAX_INT) { + PyErr_Format(PyExc_ValueError, + "concatenate() only supports up to %d arrays but got %zd.", + NPY_MAX_INT, narrays_true); + return NULL; + } + narrays = (int)narrays_true; arrays = PyArray_malloc(narrays * sizeof(arrays[0])); if (arrays == NULL) { PyErr_NoMemory(); diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index f7b944be08b7..8de24278fc5d 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -1,3 +1,5 @@ +import sys + import pytest import numpy as np @@ -29,6 +31,7 @@ assert_raises, assert_raises_regex, ) +from numpy.testing._private.utils import requires_memory class TestAtleast1d: @@ -290,6 +293,17 @@ def test_exceptions(self): # No arrays to concatenate raises ValueError assert_raises(ValueError, concatenate, ()) + @pytest.mark.slow + @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @requires_memory(2 * np.iinfo(np.intc).max) + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + def test_concatenate_axis_None(self): a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) From 1da34ea0daaedb94d2b673c70a47f4a57883a713 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 07:43:13 +0200 Subject: [PATCH 138/294] DEP: Deprecate setting the strides attribute of a numpy array (#28925) Deprecate setting strides (mutating) on an array. --------- Co-authored-by: Charles Harris Co-authored-by: Sebastian Berg Co-authored-by: Joren Hammudoglu --- .../upcoming_changes/28925.deprecation.rst | 9 +++++ numpy/__init__.pyi | 3 +- numpy/_core/src/multiarray/getset.c | 22 ++++++++---- numpy/_core/tests/test_api.py | 3 +- numpy/_core/tests/test_deprecations.py | 7 ++++ numpy/_core/tests/test_half.py | 6 ++-- numpy/_core/tests/test_multiarray.py | 36 ++++++++++++------- numpy/_core/tests/test_nditer.py | 4 +-- numpy/_core/tests/test_regression.py | 23 ++++++------ numpy/lib/_npyio_impl.py | 2 +- 10 files changed, 75 insertions(+), 40 deletions(-) create mode 100644 doc/release/upcoming_changes/28925.deprecation.rst diff --git a/doc/release/upcoming_changes/28925.deprecation.rst b/doc/release/upcoming_changes/28925.deprecation.rst new file mode 100644 index 000000000000..a421839394fa --- /dev/null +++ b/doc/release/upcoming_changes/28925.deprecation.rst @@ -0,0 +1,9 @@ +Setting the ``strides`` attribute is deprecated +----------------------------------------------- +Setting the strides attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view (no copy) via: +* `np.lib.stride_tricks.strided_window_view` if applicable, +* `np.lib.stride_tricks.as_strided` for the general case, +* or the `np.ndarray` constructor (``buffer`` is the original array) for a light-weight version. + diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bcbe95accee6..2e9dea06ce6a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -217,7 +217,7 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar +from typing_extensions import CapsuleType, TypeVar, deprecated from numpy import ( char, @@ -2169,6 +2169,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def shape(self, value: _ShapeLike) -> None: ... @property def strides(self) -> _Shape: ... + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") @strides.setter def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 8482b6006e3e..48da52dd3178 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -85,7 +85,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) /* Free old dimensions and strides */ npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = nd; - ((PyArrayObject_fields *)self)->dimensions = _dimensions; + ((PyArrayObject_fields *)self)->dimensions = _dimensions; ((PyArrayObject_fields *)self)->strides = _dimensions + nd; if (nd) { @@ -95,7 +95,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) } else { /* Free old dimensions and strides */ - npy_free_cache_dim_array(self); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; @@ -116,6 +116,19 @@ array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static int array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) { + if (obj == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array strides"); + return -1; + } + + /* Deprecated NumPy 2.4, 2025-05-11 */ + if (DEPRECATE("Setting the strides on a NumPy array has been deprecated in NumPy 2.4.\n" + "As an alternative, you can create a new view using np.lib.stride_tricks.as_strided." + ) < 0 ) { + return -1; + } + PyArray_Dims newstrides = {NULL, -1}; PyArrayObject *new; npy_intp numbytes = 0; @@ -124,11 +137,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) npy_intp upper_offset = 0; Py_buffer view; - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array strides"); - return -1; - } if (!PyArray_OptionalIntpConverter(obj, &newstrides) || newstrides.len == -1) { PyErr_SetString(PyExc_TypeError, "invalid strides"); diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index d427ac0399a2..bb21d79c472d 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -5,6 +5,7 @@ import numpy as np import numpy._core.umath as ncu from numpy._core._rational_tests import rational +from numpy.lib import stride_tricks from numpy.testing import ( HAS_REFCOUNT, assert_, @@ -558,7 +559,7 @@ def check_copy_result(x, y, ccontig, fcontig, strides=False): def test_contiguous_flags(): a = np.ones((4, 4, 1))[::2, :, :] - a.strides = a.strides[:2] + (-123,) + a = stride_tricks.as_strided(a, strides=a.strides[:2] + (-123,)) b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) def check_contig(a, ccontig, fcontig): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index c4acbf9d2d69..7d4875d6d149 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -406,6 +406,13 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called +class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): + message = "Setting the .*on a NumPy array has been deprecated.*" + + def test_deprecated_strides_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index e2d6e6796db4..711c13655b7a 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -21,7 +21,7 @@ class TestHalf: def setup_method(self): # An array of all possible float16 values self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 + self.all_f16 = self.all_f16.view(float16) # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): @@ -32,7 +32,7 @@ def setup_method(self): self.nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 + self.nonan_f16 = self.nonan_f16.view(float16) self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) @@ -218,7 +218,7 @@ def test_half_values(self): 0x0001, 0x8001, 0x0000, 0x8000, 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 + b = b.view(dtype=float16) assert_equal(a, b) def test_half_rounding(self): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 81e55deb3daf..0faf35c64b98 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -29,6 +29,7 @@ from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib import stride_tricks from numpy.lib.recfunctions import repack_fields from numpy.testing import ( BLAS_SUPPORTS_FPE, @@ -382,7 +383,8 @@ def make_array(size, offset, strides): offset=offset * x.itemsize) except Exception as e: raise RuntimeError(e) - r.strides = strides = strides * x.itemsize + with pytest.warns(DeprecationWarning): + r.strides = strides * x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) @@ -392,24 +394,28 @@ def make_array(size, offset, strides): assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + x = stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): - arr.strides = strides + with pytest.warns(DeprecationWarning): + arr.strides = strides assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + x = stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] - a.strides = 1 - a[::2].strides = 2 + with pytest.warns(DeprecationWarning): + a.strides = 1 + with pytest.warns(DeprecationWarning): + a[::2].strides = 2 # test 0d arr_0d = np.array(0) - arr_0d.strides = () + with pytest.warns(DeprecationWarning): + arr_0d.strides = () assert_raises(TypeError, set_strides, arr_0d, None) def test_fill(self): @@ -3635,7 +3641,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) @@ -3644,7 +3650,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) @@ -3657,7 +3663,7 @@ def test_ravel(self): # 1-element tidy strides test: a = np.array([[1]]) - a.strides = (123, 432) + a = stride_tricks.as_strided(a, strides=(123, 432)) if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) @@ -4546,7 +4552,8 @@ def test_datetime64_byteorder(self): original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]') original_byte_reversed = original.copy(order='K') - original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S') + new_dtype = original_byte_reversed.dtype.newbyteorder('S') + original_byte_reversed = original_byte_reversed.view(dtype=new_dtype) original_byte_reversed.byteswap(inplace=True) new = pickle.loads(pickle.dumps(original_byte_reversed)) @@ -8366,10 +8373,13 @@ def test_padded_struct_array(self): self._check_roundtrip(x3) @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") - def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): # noqa: B008 + def test_relaxed_strides(self, c=stride_tricks.as_strided( # noqa: B008 + np.ones((1, 10, 10), dtype='i8'), # noqa: B008 + strides=(-1, 80, 8) + ) + ): # Note: c defined as parameter so that it is persistent and leak # checks will notice gh-16934 (buffer info cache leak). - c.strides = (-1, 80, 8) # strides need to be fixed at export assert_(memoryview(c).strides == (800, 80, 8)) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index a29a49bfb71a..f71130f16331 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -858,7 +858,7 @@ def test_iter_nbo_align_contig(): # Unaligned input a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] - a.dtype = 'f4' + a = a.view('f4') a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy @@ -1803,7 +1803,7 @@ def test_iter_buffering(): arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] - a.dtype = 'i4' + a = a.view('i4') a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index fbfa9311a1dc..3d44728aafaa 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -12,6 +12,7 @@ import numpy as np from numpy._utils import asbytes, asunicode from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib.stride_tricks import as_strided from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, @@ -208,7 +209,7 @@ def test_mem_dot(self): # Dummy array to detect bad memory access: _z = np.ones(10) _dummy = np.empty((0, 10)) - z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + z = as_strided(_z, _dummy.shape, _dummy.strides) np.dot(x, np.transpose(y), out=z) assert_equal(_z, np.ones(10)) # Do the same for the built-in dot: @@ -438,19 +439,16 @@ def test_lexsort_zerolen_custom_strides(self): xs = np.array([], dtype='i8') assert np.lexsort((xs,)).shape[0] == 0 # Works - xs.strides = (16,) + xs = as_strided(xs, strides=(16,)) assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError def test_lexsort_zerolen_custom_strides_2d(self): xs = np.array([], dtype='i8') + xt = as_strided(xs, shape=(0, 2), strides=(16, 16)) + assert np.lexsort((xt,), axis=0).shape[0] == 0 - xs.shape = (0, 2) - xs.strides = (16, 16) - assert np.lexsort((xs,), axis=0).shape[0] == 0 - - xs.shape = (2, 0) - xs.strides = (16, 16) - assert np.lexsort((xs,), axis=0).shape[0] == 2 + xt = as_strided(xs, shape=(2, 0), strides=(16, 16)) + assert np.lexsort((xt,), axis=0).shape[0] == 2 def test_lexsort_invalid_axis(self): assert_raises(AxisError, np.lexsort, (np.arange(1),), axis=2) @@ -644,7 +642,7 @@ def test_reshape_order(self): def test_reshape_zero_strides(self): # Issue #380, test reshaping of zero strided arrays a = np.ones(1) - a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + a = as_strided(a, shape=(5,), strides=(0,)) assert_(a.reshape(5, 1).strides[0] == 0) def test_reshape_zero_size(self): @@ -1654,7 +1652,7 @@ def test_eq_string_and_object_array(self): def test_nonzero_byteswap(self): a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 + a = a.view(np.float32) assert_equal(a.nonzero()[0], [1]) a = a.byteswap() a = a.view(a.dtype.newbyteorder()) @@ -1878,7 +1876,8 @@ def test_alignment_update(self): # Check that alignment flag is updated on stride setting a = np.arange(10) assert_(a.flags.aligned) - a.strides = 3 + with pytest.warns(DeprecationWarning): + a.strides = 3 assert_(not a.flags.aligned) def test_ticket_1770(self): diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index f284eeb74834..36ead97a1aae 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1730,7 +1730,7 @@ def fromregex(file, regexp, dtype, encoding=None): # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) - output.dtype = dtype + output = output.view(dtype) else: output = np.array(seq, dtype=dtype) From 20d034fff6931b97780e9f22172309d81a5e8322 Mon Sep 17 00:00:00 2001 From: Koki Watanabe <56009584+math-hiyoko@users.noreply.github.com> Date: Fri, 20 Jun 2025 22:56:31 +0900 Subject: [PATCH 139/294] ENH: np.unique: support hash based unique for string dtype (#28767) * Support NPY_STRING, NPY_UNICODE * unique for NPY_STRING and NPY_UNICODE * fix construct array * remove unneccessary include * refactor * refactoring * comment * feature: unique for NPY_VSTRING * refactoring * remove unneccessary include * add test * add error message * linter * linter * reserve bucket * remove emoji from testcase * fix testcase * remove error * fix testcase * fix testcase name * use basic_string * fix testcase * add ValueError * fix testcase * fix memory error * remove multibyte char * refactoring * add multibyte char * refactoring * fix memory error * fix GIL * fix strlen * remove PyArray_GETPTR1 * refactoring * refactoring * use optional * refactoring * refactoring * refactoring * refactoring * fix comment * linter * add doc * DOC: fix * DOC: fix format * MNT: refactoring * MNT: refactoring * ENH: Store pointers to strings in the set instead of the strings themselves. * FIX: length in memcmp * ENH: refactoring * DOC: 49sec -> 34sec * Update numpy/lib/_arraysetops_impl.py Co-authored-by: Nathan Goldbaum * DOC: Mention that hash-based np.unique returns unsorted strings * ENH: support medium and long vstrings * FIX: comment * ENH: use RAII wrapper * FIX: error handling of string packing * FIX: error handling of string packing * FIX: change default bucket size * FIX: include * FIX: cast * ENH: support equal_nan=False * FIX: function equal * FIX: check the case if pack_status douesn't return NULL * FIX: check the case if pack_status douesn't return NULL * FIX: stderr * ENH: METH_VARARGS -> METH_FASTCALL * FIX: log * FIX: release allocator * FIX: comment * FIX: delete log * ENH: implemented FNV-1a as hash function * bool -> npy_bool * FIX: cast * 34sec -> 35.1sec * fix: lint * fix: cast using const void * * fix: fix fnv1a hash * fix: lint * 35.1sec -> 33.5sec * enh: define macro HASH_TABLE_INITIAL_BUCKETS * enh: error handling of NpyString_load * enh: delete comments on GIL * fix: PyErr_SetString when NpyString_load failed * fix: PyErr_SetString -> npy_gil_error --------- Co-authored-by: Nathan Goldbaum --- doc/release/upcoming_changes/28767.change.rst | 10 + .../upcoming_changes/28767.performance.rst | 10 + numpy/_core/meson.build | 1 + numpy/_core/src/multiarray/fnv.c | 85 ++++ numpy/_core/src/multiarray/fnv.h | 26 ++ numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/unique.cpp | 368 ++++++++++++++---- numpy/_core/src/multiarray/unique.h | 3 +- numpy/lib/_arraysetops_impl.py | 3 +- numpy/lib/tests/test_arraysetops.py | 186 ++++++++- 10 files changed, 605 insertions(+), 89 deletions(-) create mode 100644 doc/release/upcoming_changes/28767.change.rst create mode 100644 doc/release/upcoming_changes/28767.performance.rst create mode 100644 numpy/_core/src/multiarray/fnv.c create mode 100644 numpy/_core/src/multiarray/fnv.h diff --git a/doc/release/upcoming_changes/28767.change.rst b/doc/release/upcoming_changes/28767.change.rst new file mode 100644 index 000000000000..ec173c3672b0 --- /dev/null +++ b/doc/release/upcoming_changes/28767.change.rst @@ -0,0 +1,10 @@ +``unique_values`` for string dtypes may return unsorted data +------------------------------------------------------------ +np.unique now supports hash‐based duplicate removal for string dtypes. +This enhancement extends the hash-table algorithm to byte strings ('S'), +Unicode strings ('U'), and the experimental string dtype ('T', StringDType). +As a result, calling np.unique() on an array of strings will use +the faster hash-based method to obtain unique values. +Note that this hash-based method does not guarantee that the returned unique values will be sorted. +This also works for StringDType arrays containing None (missing values) +when using equal_nan=True (treating missing values as equal). diff --git a/doc/release/upcoming_changes/28767.performance.rst b/doc/release/upcoming_changes/28767.performance.rst new file mode 100644 index 000000000000..ef8ac1c3a45d --- /dev/null +++ b/doc/release/upcoming_changes/28767.performance.rst @@ -0,0 +1,10 @@ +Performance improvements to ``np.unique`` for string dtypes +----------------------------------------------------------- +The hash-based algorithm for unique extraction provides +an order-of-magnitude speedup on large string arrays. +In an internal benchmark with about 1 billion string elements, +the hash-based np.unique completed in roughly 33.5 seconds, +compared to 498 seconds with the sort-based method +– about 15× faster for unsorted unique operations on strings. +This improvement greatly reduces the time to find unique values +in very large string datasets. diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index cd46a20b0246..6098986618e4 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1207,6 +1207,7 @@ src_multiarray = multiarray_gen_headers + [ # Remove this `arm64_exports.c` file once scipy macos arm64 build correctly # links to the arm64 npymath library, see gh-22673 'src/npymath/arm64_exports.c', + 'src/multiarray/fnv.c', ] src_umath = umath_gen_headers + [ diff --git a/numpy/_core/src/multiarray/fnv.c b/numpy/_core/src/multiarray/fnv.c new file mode 100644 index 000000000000..2b7848519e61 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.c @@ -0,0 +1,85 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include +#include "numpy/npy_common.h" +#include "fnv.h" + + +#define FNV1A_32_INIT ((npy_uint32)0x811c9dc5) +#define FNV1A_64_INIT ((npy_uint64)0xcbf29ce484222325ULL) + +/* + Compute a 32-bit FNV-1a hash of buffer + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_32a.c +*/ +npy_uint32 +npy_fnv1a_32(const void *buf, size_t len, npy_uint32 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint32)*bp++; + + /* multiply by the 32 bit FNV magic prime */ + /* hval *= 0x01000193; */ + hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24); + } + + return hval; +} + +/* + Compute a 64-bit FNV-1a hash of the given data + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_64a.c +*/ +npy_uint64 +npy_fnv1a_64(const void *buf, size_t len, npy_uint64 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint64)*bp++; + + /* multiply by the 64 bit FNV magic prime */ + /* hval *= 0x100000001b3ULL; */ + hval += (hval << 1) + (hval << 4) + (hval << 5) + + (hval << 7) + (hval << 8) + (hval << 40); + } + + return hval; +} + +/* + * Compute a size_t FNV-1a hash of the given data + * This will use 32-bit or 64-bit hash depending on the size of size_t + */ +size_t +npy_fnv1a(const void *buf, size_t len) +{ +#if NPY_SIZEOF_SIZE_T == 8 + return (size_t)npy_fnv1a_64(buf, len, FNV1A_64_INIT); +#else /* NPY_SIZEOF_SIZE_T == 4 */ + return (size_t)npy_fnv1a_32(buf, len, FNV1A_32_INIT); +#endif +} diff --git a/numpy/_core/src/multiarray/fnv.h b/numpy/_core/src/multiarray/fnv.h new file mode 100644 index 000000000000..c76f54a645b9 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.h @@ -0,0 +1,26 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ + + +/* + Compute a size_t FNV-1a hash of the given data + This will use 32-bit or 64-bit hash depending on the size of size_t + + Parameters: + ----------- + buf - pointer to the data to be hashed + len - length of the data in bytes + + Returns: + ----------- + size_t hash value +*/ +size_t npy_fnv1a(const void *buf, size_t len); + +#endif // NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index d4766b5af7b4..e80c6c0cd45c 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4579,7 +4579,7 @@ static struct PyMethodDef array_module_methods[] = { {"from_dlpack", (PyCFunction)from_dlpack, METH_FASTCALL | METH_KEYWORDS, NULL}, {"_unique_hash", (PyCFunction)array__unique_hash, - METH_O, "Collect unique values via a hash map."}, + METH_FASTCALL | METH_KEYWORDS, "Collect unique values via a hash map."}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index f36acfdef49a..636f1ef0137c 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -1,13 +1,21 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define HASH_TABLE_INITIAL_BUCKETS 1024 #include -#include +#include +#include #include +#include #include #include "numpy/arrayobject.h" +#include "gil_utils.h" +extern "C" { + #include "fnv.h" + #include "npy_argparse.h" +} // This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. // Adapted from https://stackoverflow.com/a/25510879/2536294 @@ -18,77 +26,128 @@ struct FinalAction { private: F clean_; }; - template FinalAction finally(F f) { return FinalAction(f); } -template +template static PyObject* -unique(PyArrayObject *self) +unique_integer(PyArrayObject *self, npy_bool equal_nan) { - /* This function takes a numpy array and returns a numpy array containing - the unique values. - - It assumes the numpy array includes data that can be viewed as unsigned integers - of a certain size (sizeof(T)). - - It doesn't need to know the actual type, since it needs to find unique values - among binary representations of the input data. This means it won't apply to - custom or complicated dtypes or string values. + /* + * Returns a new NumPy array containing the unique values of the input array of integer. + * This function uses hashing to identify uniqueness efficiently. */ NPY_ALLOW_C_API_DEF; - std::unordered_set hashset; - - NpyIter *iter = NpyIter_New(self, NPY_ITER_READONLY | - NPY_ITER_EXTERNAL_LOOP | - NPY_ITER_REFS_OK | - NPY_ITER_ZEROSIZE_OK | - NPY_ITER_GROWINNER, - NPY_KEEPORDER, NPY_NO_CASTING, - NULL); - // Making sure the iterator is deallocated when the function returns, with - // or w/o an exception - auto iter_dealloc = finally([&]() { NpyIter_Deallocate(iter); }); - if (iter == NULL) { - return NULL; + NPY_ALLOW_C_API; + PyArray_Descr *descr = PyArray_DESCR(self); + Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset(std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS)); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(*(T *)idata); } - NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; + PyObject *res_obj = PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + // This flag is needed to be able to call .sort on it. + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ); + + if (res_obj == NULL) { return NULL; } - char **dataptr = NpyIter_GetDataPtrArray(iter); - npy_intp *strideptr = NpyIter_GetInnerStrideArray(iter); - npy_intp *innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); - - // release the GIL - PyThreadState *_save; - _save = PyEval_SaveThread(); - // Making sure the GIL is re-acquired when the function returns, with - // or w/o an exception - auto grab_gil = finally([&]() { PyEval_RestoreThread(_save); }); - // first we put the data in a hash map - - if (NpyIter_GetIterSize(iter) > 0) { - do { - char* data = *dataptr; - npy_intp stride = *strideptr; - npy_intp count = *innersizeptr; - - while (count--) { - hashset.insert(*((T *) data)); - data += stride; - } - } while (iternext(iter)); + NPY_DISABLE_C_API; + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + *(T *)odata = *it; } - npy_intp length = hashset.size(); + return res_obj; +} +template +static PyObject* +unique_string(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ + NPY_ALLOW_C_API_DEF; NPY_ALLOW_C_API; PyArray_Descr *descr = PyArray_DESCR(self); Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // variables for the string + npy_intp itemsize = descr->elsize; + npy_intp num_chars = itemsize / sizeof(T); + auto hash = [num_chars](const T *value) -> size_t { + return npy_fnv1a(value, num_chars * sizeof(T)); + }; + auto equal = [itemsize](const T *lhs, const T *rhs) -> bool { + return std::memcmp(lhs, rhs, itemsize) == 0; + }; + + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert((T *)idata); + } + + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; PyObject *res_obj = PyArray_NewFromDescr( &PyArray_Type, descr, @@ -100,18 +159,147 @@ unique(PyArrayObject *self) NPY_ARRAY_WRITEABLE, // flags NULL // obj ); + + if (res_obj == NULL) { + return NULL; + } NPY_DISABLE_C_API; + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } + + return res_obj; +} + +static PyObject* +unique_vstring(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ + NPY_ALLOW_C_API_DEF; + NPY_ALLOW_C_API; + PyArray_Descr *descr = PyArray_DESCR(self); + Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // variables for the vstring + npy_string_allocator *in_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)descr); + auto hash = [equal_nan](const npy_static_string *value) -> size_t { + if (value->buf == NULL) { + if (equal_nan) { + return 0; + } else { + return std::hash{}(value); + } + } + return npy_fnv1a(value->buf, value->size * sizeof(char)); + }; + auto equal = [equal_nan](const npy_static_string *lhs, const npy_static_string *rhs) -> bool { + if (lhs->buf == NULL && rhs->buf == NULL) { + if (equal_nan) { + return true; + } else { + return lhs == rhs; + } + } + if (lhs->buf == NULL || rhs->buf == NULL) { + return false; + } + if (lhs->size != rhs->size) { + return false; + } + return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; + }; + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + // unpacked_strings need to be allocated outside of the loop because of the lifetime problem. + std::vector unpacked_strings(isize, {0, NULL}); + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = (npy_packed_static_string *)idata; + int is_null = NpyString_load(in_allocator, packed_string, &unpacked_strings[i]); + if (is_null == -1) { + npy_gil_error(PyExc_RuntimeError, + "Failed to load string from packed static string. "); + return NULL; + } + hashset.insert(&unpacked_strings[i]); + } + + NpyString_release_allocator(in_allocator); + + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; + PyObject *res_obj = PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + // This flag is needed to be able to call .sort on it. + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ); if (res_obj == NULL) { return NULL; } + PyArray_Descr *res_descr = PyArray_DESCR((PyArrayObject *)res_obj); + Py_INCREF(res_descr); + NPY_DISABLE_C_API; + + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + npy_string_allocator *out_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)res_descr); + auto out_allocator_dealloc = finally([&]() { + NpyString_release_allocator(out_allocator); + }); - // then we iterate through the map's keys to get the unique values - T* data = (T *)PyArray_DATA((PyArrayObject *)res_obj); - auto it = hashset.begin(); - size_t i = 0; - for (; it != hashset.end(); it++, i++) { - data[i] = *it; + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = (npy_packed_static_string *)odata; + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(out_allocator, packed_string); + } else { + pack_status = NpyString_pack(out_allocator, packed_string, (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } } return res_obj; @@ -119,27 +307,30 @@ unique(PyArrayObject *self) // this map contains the functions used for each item size. -typedef std::function function_type; +typedef std::function function_type; std::unordered_map unique_funcs = { - {NPY_BYTE, unique}, - {NPY_UBYTE, unique}, - {NPY_SHORT, unique}, - {NPY_USHORT, unique}, - {NPY_INT, unique}, - {NPY_UINT, unique}, - {NPY_LONG, unique}, - {NPY_ULONG, unique}, - {NPY_LONGLONG, unique}, - {NPY_ULONGLONG, unique}, - {NPY_INT8, unique}, - {NPY_INT16, unique}, - {NPY_INT32, unique}, - {NPY_INT64, unique}, - {NPY_UINT8, unique}, - {NPY_UINT16, unique}, - {NPY_UINT32, unique}, - {NPY_UINT64, unique}, - {NPY_DATETIME, unique}, + {NPY_BYTE, unique_integer}, + {NPY_UBYTE, unique_integer}, + {NPY_SHORT, unique_integer}, + {NPY_USHORT, unique_integer}, + {NPY_INT, unique_integer}, + {NPY_UINT, unique_integer}, + {NPY_LONG, unique_integer}, + {NPY_ULONG, unique_integer}, + {NPY_LONGLONG, unique_integer}, + {NPY_ULONGLONG, unique_integer}, + {NPY_INT8, unique_integer}, + {NPY_INT16, unique_integer}, + {NPY_INT32, unique_integer}, + {NPY_INT64, unique_integer}, + {NPY_UINT8, unique_integer}, + {NPY_UINT16, unique_integer}, + {NPY_UINT32, unique_integer}, + {NPY_UINT64, unique_integer}, + {NPY_DATETIME, unique_integer}, + {NPY_STRING, unique_string}, + {NPY_UNICODE, unique_string}, + {NPY_VSTRING, unique_vstring}, }; @@ -154,14 +345,21 @@ std::unordered_map unique_funcs = { * type is unsupported or `NULL` with an error set. */ extern "C" NPY_NO_EXPORT PyObject * -array__unique_hash(PyObject *NPY_UNUSED(module), PyObject *arr_obj) +array__unique_hash(PyObject *NPY_UNUSED(module), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - if (!PyArray_Check(arr_obj)) { - PyErr_SetString(PyExc_TypeError, - "_unique_hash() requires a NumPy array input."); + PyArrayObject *arr = NULL; + npy_bool equal_nan = NPY_TRUE; // default to True + + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("_unique_hash", args, len_args, kwnames, + "arr", &PyArray_Converter, &arr, + "|equal_nan", &PyArray_BoolConverter, &equal_nan, + NULL, NULL, NULL + ) < 0 + ) { return NULL; } - PyArrayObject *arr = (PyArrayObject *)arr_obj; try { auto type = PyArray_TYPE(arr); @@ -170,7 +368,7 @@ array__unique_hash(PyObject *NPY_UNUSED(module), PyObject *arr_obj) Py_RETURN_NOTIMPLEMENTED; } - return unique_funcs[type](arr); + return unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); diff --git a/numpy/_core/src/multiarray/unique.h b/numpy/_core/src/multiarray/unique.h index 3e258405e8f4..7b3fb143ada4 100644 --- a/numpy/_core/src/multiarray/unique.h +++ b/numpy/_core/src/multiarray/unique.h @@ -5,7 +5,8 @@ extern "C" { #endif -PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), PyObject *args); +PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); #ifdef __cplusplus } diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index ef0739ba486f..c4788385b924 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -368,7 +368,8 @@ def _unique1d(ar, return_index=False, return_inverse=False, conv = _array_converter(ar) ar_, = conv - if (hash_unique := _unique_hash(ar_)) is not NotImplemented: + if (hash_unique := _unique_hash(ar_, equal_nan=equal_nan)) \ + is not NotImplemented: if sorted: hash_unique.sort() # We wrap the result back in case it was a subclass of numpy.ndarray. diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 7865e1b16ee9..b3e2bfa279b0 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -5,6 +5,7 @@ import numpy as np from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.dtypes import StringDType from numpy.exceptions import AxisError from numpy.testing import ( assert_array_equal, @@ -813,7 +814,9 @@ def test_unique_1d(self): def test_unique_zero_sized(self): # test for zero-sized arrays - for dt in self.get_types(): + types = self.get_types() + types.extend('SU') + for dt in types: a = np.array([], dt) b = np.array([], dt) i1 = np.array([], np.int64) @@ -838,6 +841,187 @@ class Subclass(np.ndarray): bb = Subclass(b.shape, dtype=dt, buffer=b) self.check_all(aa, bb, i1, i2, c, dt) + def test_unique_byte_string_hash_based(self): + # test for byte string arrays + arr = ['apple', 'banana', 'apple', 'cherry', 'date', 'banana', 'fig', 'grape'] + unq_sorted = ['apple', 'banana', 'cherry', 'date', 'fig', 'grape'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_unicode_string_hash_based(self): + # test for unicode string arrays + arr = [ + 'café', 'cafe', 'café', 'naïve', 'naive', + 'résumé', 'naïve', 'resume', 'résumé', + ] + unq_sorted = ['cafe', 'café', 'naive', 'naïve', 'resume', 'résumé'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_vstring_hash_based_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=True) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=True) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 1) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_hash_based_not_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=False) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 6) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_errors(self): + a = np.array( + [ + 'apple', 'banana', 'apple', None, 'cherry', + 'date', 'banana', 'fig', None, 'grape', + ] * 2, + dtype=StringDType(na_object=None) + ) + assert_raises(ValueError, unique, a, equal_nan=False) + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) def test_unsupported_hash_based(self, arg): """These currently never use the hash-based solution. However, From 7c91551b85650c5465d21956acf66a47d9d0d02b Mon Sep 17 00:00:00 2001 From: specsy Date: Fri, 20 Jun 2025 20:29:10 +0530 Subject: [PATCH 140/294] DOC: Update CONTRIBUTING.rst (#28158) * Update CONTRIBUTING.rst fixes #19778 Updating the contibution section so that contributors avoid doing mkstakes while asking questions, instead they focus on doing contribution and work on project right after. * Update CONTRIBUTING.rst Shortened the length of the sentence. * Update CONTRIBUTING.rst --- CONTRIBUTING.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 6e019983a0a2..0919790c65d1 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -7,8 +7,9 @@ Whether you're new to open source or experienced, your contributions help us grow. Pull requests (PRs) are always welcome, but making a PR is just the -start. Please respond to comments and requests for changes to help -move the process forward. Please follow our +start. Please respond to comments and requests for changes to help move the process forward. +Skip asking for an issue to be assigned to you on GitHub—send in your PR, explain what you did and ask for a review. It makes collaboration and support much easier. +Please follow our `Code of Conduct `__, which applies to all interactions, including issues and PRs. From e3e875ab478a855d2c4198df652f1dc34fc09f17 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:28:38 +0200 Subject: [PATCH 141/294] MAIN: Enable linting with E501 --- numpy/_core/tests/test_api.py | 9 ++++--- numpy/_core/tests/test_shape_base.py | 9 ++++--- numpy/lib/tests/test_function_base.py | 36 ++++++++++++++++----------- ruff.toml | 4 +-- 4 files changed, 33 insertions(+), 25 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index bb21d79c472d..da4a8f423bc5 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -591,11 +591,12 @@ def check_contig(a, ccontig, fcontig): def test_broadcast_arrays(): # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + dtype = 'u4,u4,u4' + a = np.array([(1, 2, 3)], dtype=dtype) + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype) result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype=dtype)) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype)) @pytest.mark.parametrize(["shape", "fill_value", "expected_output"], [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 8de24278fc5d..9a6febaf51f0 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -294,13 +294,15 @@ def test_exceptions(self): assert_raises(ValueError, concatenate, ()) @pytest.mark.slow - @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @pytest.mark.skipif(sys.maxsize < 2**32, + reason="only problematic on 64bit platforms") @requires_memory(2 * np.iinfo(np.intc).max) def test_huge_list_error(self): a = np.array([1]) max_int = np.iinfo(np.intc).max arrs = (a,) * (max_int + 1) - msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + msg = (fr"concatenate\(\) only supports up to {max_int} arrays" + f" but got {max_int + 1}.") with pytest.raises(ValueError, match=msg): np.concatenate(arrs) @@ -379,7 +381,8 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + @pytest.mark.skipif(IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython") def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index f2dba193c849..8f79e3d88b8c 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -65,6 +65,7 @@ suppress_warnings, ) +np_floats = [np.half, np.single, np.double, np.longdouble] def get_mat(n): data = np.arange(n) @@ -309,7 +310,7 @@ def test_basic(self): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 + # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa: E501 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -2520,7 +2521,7 @@ def test_extreme(self): assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) assert_(np.all(np.abs(c) <= 1.0)) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_corrcoef_dtype(self, test_type): cast_A = self.A.astype(test_type) res = corrcoef(cast_A, dtype=test_type) @@ -2626,7 +2627,7 @@ def test_unit_fweights_and_aweights(self): aweights=self.unit_weights), self.res1) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_cov_dtype(self, test_type): cast_x1 = self.x1.astype(test_type) res = cov(cast_x1, dtype=test_type) @@ -2648,7 +2649,8 @@ def test_simple(self): # need at least one test above 8, as the implementation is piecewise A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) - expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, + 1.00006049, 2815.71662847]) assert_almost_equal(i0(A), expected) assert_almost_equal(i0(-A), expected) @@ -3141,23 +3143,27 @@ def test_non_finite_any_nan(self, sc): def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ - assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ - assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ diff --git a/ruff.toml b/ruff.toml index 7454c6c05e5b..70bef8fb095d 100644 --- a/ruff.toml +++ b/ruff.toml @@ -73,7 +73,6 @@ ignore = [ "test*.py" = ["B015", "B018", "E201", "E714"] "benchmarks/benchmarks/bench_linalg.py" = ["E501"] -"numpy/_core/tests/test_api.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] @@ -88,13 +87,12 @@ ignore = [ "numpy/_core/tests/test_umath*py" = ["E501"] "numpy/_core/tests/test_numeric*.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_shape_base.py" = ["E501"] "numpy/_core/tests/test_simd*.py" = ["E501"] "numpy/_core/tests/test_strings.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/lib/tests/test_function_base.py" = ["E501"] +#"numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] From dea4451c945f878f20402ebf2bc8261b9c000e59 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:30:38 +0200 Subject: [PATCH 142/294] MAIN: Enable linting with E501 --- numpy/lib/tests/test_io.py | 9 ++++++--- ruff.toml | 3 +-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 79fca0dd690b..48f579d7ddc7 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -645,7 +645,8 @@ def check_large_zip(memoryerror_raised): raise MemoryError("Child process raised a MemoryError exception") # -9 indicates a SIGKILL, probably an OOM. if p.exitcode == -9: - pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + msg = "subprocess got a SIGKILL, apparently free memory was not sufficient" + pytest.xfail(msg) assert p.exitcode == 0 class LoadTxtBase: @@ -1673,7 +1674,8 @@ def test_dtype_with_converters_and_usecols(self): conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', names=None, converters=conv, encoding="bytes") - control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], + dtype=dtyp) assert_equal(test, control) dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', @@ -1886,7 +1888,8 @@ def test_user_missing_values(self): # basekwargs['dtype'] = mdtype test = np.genfromtxt(TextIO(data), - missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + missing_values={0: -9, 1: -99, 2: -999j}, + usemask=True, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], diff --git a/ruff.toml b/ruff.toml index 70bef8fb095d..75a2f54a6734 100644 --- a/ruff.toml +++ b/ruff.toml @@ -92,9 +92,8 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -#"numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] -"numpy/lib/tests/test_io.py" = ["E501"] +#"numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] From 09eb6b78d6fbe017702d66c08057a5304dd38c9f Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:33:43 +0200 Subject: [PATCH 143/294] MAIN: Enable linting with E501 --- numpy/lib/tests/test_polynomial.py | 15 ++++++++++----- ruff.toml | 2 -- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index c173ac321d74..65ad5c854639 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -61,7 +61,8 @@ def test_poly1d_math(self): assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) assert_equal(p + q, np.poly1d([4., 4., 4.])) assert_equal(p - q, np.poly1d([-2., 0., 2.])) - assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., + 312., 324., 216., 81.])) assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) assert_equal(p.deriv(), np.poly1d([2., 2.])) @@ -131,12 +132,16 @@ def test_roots(self): for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1.01, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) @@ -249,8 +254,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all()) + expected = [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6] + assert_( (p2.coeffs == expected).all()) def test_zero_dims(self): try: diff --git a/ruff.toml b/ruff.toml index 75a2f54a6734..a50900937073 100644 --- a/ruff.toml +++ b/ruff.toml @@ -93,8 +93,6 @@ ignore = [ "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] -#"numpy/lib/tests/test_io.py" = ["E501"] -"numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] "numpy/f2py/*py" = ["E501"] From 1b3a0d9385aa1ded43885d966bb8733da839762f Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Sat, 21 Jun 2025 00:32:00 +0000 Subject: [PATCH 144/294] DOC: Fix some markup errors --- doc/source/f2py/buildtools/distutils-to-meson.rst | 2 +- doc/source/reference/random/multithreading.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index bf5da973e9fa..b24638e62239 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -117,7 +117,7 @@ sample is included below. +------------------------------------+-------------------------------+ | LDFLAGS | Linker options | +------------------------------------+-------------------------------+ - | LD\ :sub:`LIBRARY`\ \ :sub:`PATH`\ | Library file locations (Unix) | + | LD\_LIBRARY\_PATH | Library file locations (Unix) | +------------------------------------+-------------------------------+ | LIBS | Libraries to link against | +------------------------------------+-------------------------------+ diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 17c6a515cdbc..73d2fc9ee5ad 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,7 +9,7 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. -This example makes use of:mod:`concurrent.futures` to fill an array using +This example makes use of :mod:`concurrent.futures` to fill an array using multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. From 124ac8d41f2d913cbac1a05452cdae3d80d7e789 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 21 Jun 2025 07:40:15 -0600 Subject: [PATCH 145/294] MAINT: Update main after 2.1.0 release. - Add 2.3.1-notes.rst - Add 2.3.1-changelog.rst - Update release.rst [skip cirrus] [skip azp] [skip actions] --- doc/changelog/2.3.1-changelog.rst | 34 +++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.1-notes.rst | 53 ++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 doc/changelog/2.3.1-changelog.rst create mode 100644 doc/source/release/2.3.1-notes.rst diff --git a/doc/changelog/2.3.1-changelog.rst b/doc/changelog/2.3.1-changelog.rst new file mode 100644 index 000000000000..a1c840f8beda --- /dev/null +++ b/doc/changelog/2.3.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses unininitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. diff --git a/doc/source/release.rst b/doc/source/release.rst index 6c6a853b06f5..59e6dd07b002 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.1 2.3.0 2.2.6 2.2.5 diff --git a/doc/source/release/2.3.1-notes.rst b/doc/source/release/2.3.1-notes.rst new file mode 100644 index 000000000000..d8193f07671c --- /dev/null +++ b/doc/source/release/2.3.1-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.1 Release Notes +========================= + +The NumPy 2.3.1 release is a patch release with several bug fixes, annotation +improvements, and better support for OpenBSD. Highlights are: + +- Fix bug in ``matmul`` for non-contiguous out kwarg parameter +- Fix for Accelerate runtime warnings on M4 hardware +- Fix new in NumPy 2.3.0 ``np.vectorize`` casting errors +- Improved support of cpu features for FreeBSD and OpenBSD + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses uninitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. + From 1fefc5c6b767e86c2642641e7ba7a22ab0cf554b Mon Sep 17 00:00:00 2001 From: Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Date: Mon, 23 Jun 2025 23:26:14 +0530 Subject: [PATCH 146/294] DOC: Clarify dtype argument for __array__ in custom container guide (#29254) * DOC: Clarify dtype argument for __array__ in custom container guide --- doc/source/user/basics.dispatch.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index ae53995a3917..117d60f85467 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -46,6 +46,21 @@ array([[1., 0., 0., 0., 0.], [0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]) +The ``__array__`` method can optionally accept a `dtype` argument. If provided, +this argument specifies the desired data type for the resulting NumPy array. +Your implementation should attempt to convert the data to this `dtype` +if possible. If the conversion is not supported, it's generally best +to fall back to a default type or raise a `TypeError` or `ValueError`. + +Here's an example demonstrating its use with `dtype` specification: + +>>> np.asarray(arr, dtype=np.float32) +array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]], dtype=float32) + If we operate on ``arr`` with a numpy function, numpy will again use the ``__array__`` interface to convert it to an array and then apply the function in the usual way. From 32b43214b4622c336258899744a9d5692c30c689 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 23 Jun 2025 22:23:50 +0200 Subject: [PATCH 147/294] Apply suggestions from code review Co-authored-by: Joren Hammudoglu --- numpy/_core/tests/test_shape_base.py | 12 ++++++++---- numpy/lib/tests/test_function_base.py | 24 ++++++++++++------------ numpy/lib/tests/test_polynomial.py | 4 ++-- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 9a6febaf51f0..1b9728e5c006 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -294,8 +294,10 @@ def test_exceptions(self): assert_raises(ValueError, concatenate, ()) @pytest.mark.slow - @pytest.mark.skipif(sys.maxsize < 2**32, - reason="only problematic on 64bit platforms") + @pytest.mark.skipif( + sys.maxsize < 2**32, + reason="only problematic on 64bit platforms" + ) @requires_memory(2 * np.iinfo(np.intc).max) def test_huge_list_error(self): a = np.array([1]) @@ -381,8 +383,10 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, - reason="PYPY handles sq_concat, nb_add differently than cpython") + @pytest.mark.skipif( + IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython" + ) def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 8f79e3d88b8c..eccf4bcfb019 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -310,7 +310,7 @@ def test_basic(self): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa: E501 + # github.com/scikit-learn/scikit-learn/commit/7842748 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -3145,9 +3145,9 @@ def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ inf = np.inf nan = np.nan - assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) @@ -3156,14 +3156,14 @@ def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ inf = np.inf nan = np.nan - assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 65ad5c854639..32547f8e6c18 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -254,8 +254,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - expected = [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6] - assert_( (p2.coeffs == expected).all()) + expected = [1 / 4 / 5, 1 / 3 / 4, 1 / 2 / 3, 9 / 1 / 2, 7, 6] + assert_((p2.coeffs == expected).all()) def test_zero_dims(self): try: From 2d6a4d76062e20e6220401a1609345d433bd6c8b Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 25 Jun 2025 14:35:57 +0100 Subject: [PATCH 148/294] TYP: Type ``MaskedArray.__{mul,rmul}__`` (#29265) --- numpy/__init__.pyi | 2 + numpy/ma/core.pyi | 86 +++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 97 +++++++++++++++++++++++++++ 3 files changed, 183 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2e9dea06ce6a..9fae513bdf39 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3007,6 +3007,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__mul__` @overload def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3048,6 +3049,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index de6db7873faa..6c0de39594dd 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -658,8 +658,90 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __mul__(self, other): ... - def __rmul__(self, other): ... + # Keep in sync with `ndarray.__mul__` + @overload + def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __mul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rmul__` + @overload # signature equivalent to __mul__ + def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rmul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __truediv__(self, other): ... def __rtruediv__(self, other): ... def __floordiv__(self, other): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 97f833b6a488..60ee04218489 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -623,3 +623,100 @@ assert_type(AR_LIKE_c - MAR_o, Any) assert_type(AR_LIKE_td64 - MAR_o, Any) assert_type(AR_LIKE_dt64 - MAR_o, Any) assert_type(AR_LIKE_o - MAR_o, Any) + +# Masked Array multiplication + +assert_type(MAR_b * AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_u * MAR_b, MaskedArray[np.uint32]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_b, MaskedArray[np.signedinteger]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_b, MaskedArray[np.floating]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_b, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_b, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_b, Any) # type: ignore[assert-type] + +assert_type(MAR_u4 * AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 * AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 * AR_LIKE_o, Any) + +assert_type(MAR_i8 * AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 * AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 * AR_LIKE_o, Any) + +assert_type(MAR_f8 * AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_f8, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_f8, Any) # type: ignore[assert-type] + +assert_type(MAR_c16 * AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_c16, Any) # type: ignore[assert-type] + +assert_type(MAR_td64 * AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_td64, MaskedArray[np.datetime64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_td64, Any) # type: ignore[assert-type] + +assert_type(AR_LIKE_o * MAR_dt64, Any) # type: ignore[assert-type] + +assert_type(MAR_o * AR_LIKE_b, Any) +assert_type(MAR_o * AR_LIKE_u, Any) +assert_type(MAR_o * AR_LIKE_i, Any) +assert_type(MAR_o * AR_LIKE_f, Any) +assert_type(MAR_o * AR_LIKE_c, Any) +assert_type(MAR_o * AR_LIKE_td64, Any) +assert_type(MAR_o * AR_LIKE_dt64, Any) +assert_type(MAR_o * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] From f8281b92e527564c390f834e3d467651026b27b9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 25 Jun 2025 11:39:05 -0600 Subject: [PATCH 149/294] BUG: fix fencepost error in StringDType internals (#29269) This makes all the comparisons with NPY_MEDIUM_STRING_MAX_SIZE use <= consistently: --- .../src/multiarray/stringdtype/static_string.c | 2 +- numpy/_core/tests/test_stringdtype.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 1c29bbb67f7e..89b53bcb8538 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -478,7 +478,7 @@ heap_or_arena_allocate(npy_string_allocator *allocator, if (*flags == 0) { // string isn't previously allocated, so add to existing arena allocation char *ret = arena_malloc(arena, allocator->realloc, sizeof(char) * size); - if (size < NPY_MEDIUM_STRING_MAX_SIZE) { + if (size <= NPY_MEDIUM_STRING_MAX_SIZE) { *flags = NPY_STRING_INITIALIZED; } else { diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 9bab810d4421..1c2b606dedf3 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1193,6 +1193,24 @@ def test_growing_strings(dtype): assert_array_equal(arr, uarr) +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') + + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) + + UFUNC_TEST_DATA = [ "hello" * 10, "Ae¢☃€ 😊" * 20, From b89320a514ed121f8b21959307fc778165e53323 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 25 Jun 2025 20:03:33 +0200 Subject: [PATCH 150/294] STY: ruff/isort config tweaks - episode 2 (#29185) * DEV: enable ruff/isort ``combine-as-imports`` and ``split-on-trailing-comma`` * STY: run ``ruff check --fix`` to fix the new ``I001`` errors --- numpy/__config__.pyi | 10 +- numpy/__init__.py | 3 +- numpy/_core/_add_newdocs_scalars.py | 3 +- numpy/_core/_asarray.py | 6 +- numpy/_core/_dtype.pyi | 3 +- numpy/_core/_methods.py | 4 +- numpy/_core/_type_aliases.pyi | 3 +- numpy/_core/defchararray.py | 14 +- numpy/_core/defchararray.pyi | 31 ++- numpy/_core/fromnumeric.py | 5 +- numpy/_core/function_base.pyi | 3 +- numpy/_core/getlimits.py | 3 +- numpy/_core/multiarray.pyi | 8 +- numpy/_core/numeric.py | 3 +- numpy/_core/numeric.pyi | 20 +- numpy/_core/numerictypes.pyi | 3 +- numpy/_core/records.py | 3 +- numpy/_core/shape_base.py | 4 +- numpy/_core/strings.py | 28 +- numpy/_core/strings.pyi | 16 +- numpy/_core/tests/test_argparse.py | 2 - numpy/_core/tests/test_custom_dtypes.py | 2 +- numpy/_core/tests/test_indexerrors.py | 5 +- numpy/_core/tests/test_numeric.py | 3 +- numpy/_core/tests/test_scalar_ctors.py | 6 +- numpy/_core/tests/test_stringdtype.py | 3 +- numpy/_core/tests/test_umath.py | 3 +- numpy/_typing/__init__.py | 262 +++++++++--------- numpy/_typing/_callable.pyi | 6 +- numpy/_typing/_dtype_like.py | 9 +- numpy/_utils/__init__.pyi | 3 +- numpy/_utils/_pep440.pyi | 4 +- numpy/ctypeslib/__init__.pyi | 18 -- numpy/ctypeslib/_ctypeslib.pyi | 10 +- numpy/dtypes.pyi | 2 +- numpy/f2py/__init__.pyi | 3 +- numpy/f2py/_backends/_meson.pyi | 3 +- numpy/f2py/auxfuncs.pyi | 3 +- numpy/f2py/crackfortran.pyi | 13 +- numpy/f2py/f2py2e.pyi | 3 +- numpy/f2py/rules.pyi | 3 +- numpy/f2py/symbolic.pyi | 3 +- numpy/f2py/tests/test_kind.py | 2 - numpy/fft/__init__.pyi | 7 +- numpy/fft/_helper.pyi | 3 +- numpy/fft/_pocketfft.pyi | 3 +- numpy/fft/helper.pyi | 3 +- numpy/lib/__init__.pyi | 52 ++-- numpy/lib/_arraypad_impl.pyi | 11 +- numpy/lib/_arraysetops_impl.pyi | 11 +- numpy/lib/_function_base_impl.py | 11 +- numpy/lib/_function_base_impl.pyi | 2 +- numpy/lib/_histograms_impl.pyi | 14 +- numpy/lib/_index_tricks_impl.pyi | 13 +- numpy/lib/_nanfunctions_impl.pyi | 6 +- numpy/lib/_npyio_impl.pyi | 2 +- numpy/lib/_polynomial_impl.pyi | 4 +- numpy/lib/_twodim_base_impl.pyi | 10 +- numpy/lib/_type_check_impl.pyi | 3 +- numpy/lib/array_utils.pyi | 6 - numpy/lib/format.pyi | 42 --- numpy/lib/introspect.py | 3 +- numpy/lib/mixins.pyi | 3 +- numpy/lib/npyio.pyi | 4 - numpy/lib/scimath.pyi | 18 -- numpy/lib/stride_tricks.pyi | 2 - numpy/lib/tests/test__iotools.py | 7 +- numpy/linalg/__init__.pyi | 4 +- numpy/linalg/_linalg.py | 30 +- numpy/linalg/_linalg.pyi | 2 +- numpy/linalg/_umath_linalg.pyi | 3 +- numpy/ma/core.py | 2 +- numpy/ma/extras.py | 3 +- numpy/ma/tests/test_mrecords.py | 14 +- numpy/ma/tests/test_old_ma.py | 6 +- numpy/matrixlib/tests/test_matrix_linalg.py | 2 +- numpy/polynomial/chebyshev.pyi | 3 +- numpy/polynomial/hermite.pyi | 3 +- numpy/polynomial/hermite_e.pyi | 3 +- numpy/polynomial/laguerre.pyi | 3 +- numpy/polynomial/legendre.pyi | 3 +- numpy/polynomial/polynomial.pyi | 3 +- numpy/polynomial/polyutils.pyi | 9 +- numpy/polynomial/tests/test_chebyshev.py | 7 +- numpy/polynomial/tests/test_classes.py | 7 +- numpy/polynomial/tests/test_hermite.py | 7 +- numpy/polynomial/tests/test_hermite_e.py | 7 +- numpy/polynomial/tests/test_laguerre.py | 7 +- numpy/polynomial/tests/test_legendre.py | 7 +- numpy/polynomial/tests/test_polyutils.py | 7 +- .../tests/test_randomstate_regression.py | 6 +- numpy/random/tests/test_regression.py | 6 +- numpy/testing/_private/utils.pyi | 2 +- numpy/tests/test_reloading.py | 8 +- numpy/tests/test_scripts.py | 3 +- ruff.toml | 2 + 96 files changed, 352 insertions(+), 601 deletions(-) diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi index b59bdcd252b6..21e8b01fdd96 100644 --- a/numpy/__config__.pyi +++ b/numpy/__config__.pyi @@ -1,7 +1,13 @@ from enum import Enum from types import ModuleType -from typing import Final, NotRequired, TypedDict, overload, type_check_only -from typing import Literal as L +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) _CompilerConfigDictValue = TypedDict( "_CompilerConfigDictValue", diff --git a/numpy/__init__.py b/numpy/__init__.py index aadc1fab3407..d6702bba4622 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -454,8 +454,7 @@ pass del ta - from . import lib - from . import matrixlib as _mat + from . import lib, matrixlib as _mat from .lib import scimath as emath from .lib._arraypad_impl import pad from .lib._arraysetops_impl import ( diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 96170d80c7c9..9d4cb48825e0 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -6,8 +6,7 @@ import os import sys -from numpy._core import dtype -from numpy._core import numerictypes as _numerictypes +from numpy._core import dtype, numerictypes as _numerictypes from numpy._core.function_base import add_newdoc ############################################################################## diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 613c5cf57060..edaff5222f69 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -4,11 +4,7 @@ `require` fits this category despite its name not matching this pattern. """ from .multiarray import array, asanyarray -from .overrides import ( - array_function_dispatch, - finalize_array_function_like, - set_module, -) +from .overrides import array_function_dispatch, finalize_array_function_like, set_module __all__ = ["require"] diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index adb38583783f..28adecf4ad2f 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,5 +1,4 @@ -from typing import Final, TypeAlias, TypedDict, overload, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypeAlias, TypedDict, overload, type_check_only from typing_extensions import ReadOnly, TypeVar import numpy as np diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index 21ad7900016b..a3d5b02a2a14 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -9,9 +9,7 @@ from contextlib import nullcontext import numpy as np -from numpy._core import multiarray as mu -from numpy._core import numerictypes as nt -from numpy._core import umath as um +from numpy._core import multiarray as mu, numerictypes as nt, umath as um from numpy._core.multiarray import asanyarray from numpy._globals import _NoValue diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 3c9dac7a1202..3d57e8135378 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,6 +1,5 @@ from collections.abc import Collection -from typing import Final, TypeAlias, TypedDict, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only import numpy as np diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index bde8921f5504..0378e976254d 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -22,31 +22,19 @@ from numpy._core.multiarray import compare_chararrays from numpy._core.strings import ( _join as join, -) -from numpy._core.strings import ( _rsplit as rsplit, -) -from numpy._core.strings import ( _split as split, -) -from numpy._core.strings import ( _splitlines as splitlines, ) from numpy._utils import set_module from numpy.strings import * from numpy.strings import ( multiply as strings_multiply, -) -from numpy.strings import ( partition as strings_partition, -) -from numpy.strings import ( rpartition as strings_rpartition, ) -from .numeric import array as narray -from .numeric import asarray as asnarray -from .numeric import ndarray +from .numeric import array as narray, asarray as asnarray, ndarray from .numerictypes import bytes_, character, str_ __all__ = [ diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 26a5af432824..25754965c46a 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,5 +1,12 @@ -from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload -from typing import Literal as L +from typing import ( + Any, + Literal as L, + Self, + SupportsIndex, + SupportsInt, + TypeAlias, + overload, +) from typing_extensions import TypeVar import numpy as np @@ -14,13 +21,19 @@ from numpy import ( str_, ) from numpy._core.multiarray import compare_chararrays -from numpy._typing import NDArray, _AnyShape, _Shape, _ShapeLike, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBool_co as b_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBool_co as b_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _Shape, + _ShapeLike, + _SupportsArray, +) __all__ = [ "equal", diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index e20d774d014d..34fe1798f45e 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -8,10 +8,7 @@ import numpy as np from numpy._utils import set_module -from . import _methods, overrides -from . import multiarray as mu -from . import numerictypes as nt -from . import umath as um +from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um from ._multiarray_umath import _array_converter from .multiarray import asanyarray, asarray, concatenate diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 600265b1fd0a..19e1238c4e15 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,6 +1,5 @@ from _typeshed import Incomplete -from typing import Literal as L -from typing import SupportsIndex, TypeAlias, TypeVar, overload +from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np from numpy._typing import ( diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index afa2ccebcfd2..a3d0086974b1 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -8,8 +8,7 @@ from numpy._utils import set_module -from . import numeric -from . import numerictypes as ntypes +from . import numeric, numerictypes as ntypes from ._machar import MachAr from .numeric import array, inf, nan from .umath import exp2, isnan, log10, nextafter diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 91ff666688bb..aa9e796da10b 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -6,6 +6,7 @@ from typing import ( Any, ClassVar, Final, + Literal as L, Protocol, SupportsIndex, TypeAlias, @@ -16,9 +17,6 @@ from typing import ( overload, type_check_only, ) -from typing import ( - Literal as L, -) from typing_extensions import CapsuleType import numpy as np @@ -41,6 +39,7 @@ from numpy import ( # type: ignore[attr-defined] count_nonzero, datetime64, dtype, + einsum as c_einsum, flatiter, float64, floating, @@ -61,9 +60,6 @@ from numpy import ( # type: ignore[attr-defined] unsignedinteger, vecdot, ) -from numpy import ( - einsum as c_einsum, -) from numpy._typing import ( ArrayLike, # DTypes diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 964447fa0d8a..4886468f0864 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -10,8 +10,7 @@ import numpy as np from numpy.exceptions import AxisError -from . import multiarray, numerictypes, overrides, shape_base, umath -from . import numerictypes as nt +from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath from ._ufunc_config import errstate from .multiarray import ( # noqa: F401 ALLOW_THREADS, diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index b54fa856b007..c73a2694df3c 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -3,6 +3,7 @@ from collections.abc import Callable, Sequence from typing import ( Any, Final, + Literal as L, Never, NoReturn, SupportsAbs, @@ -13,7 +14,6 @@ from typing import ( Unpack, overload, ) -from typing import Literal as L import numpy as np from numpy import ( @@ -116,15 +116,15 @@ from .fromnumeric import ( transpose, var, ) -from .multiarray import ALLOW_THREADS as ALLOW_THREADS -from .multiarray import BUFSIZE as BUFSIZE -from .multiarray import CLIP as CLIP -from .multiarray import MAXDIMS as MAXDIMS -from .multiarray import MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS -from .multiarray import MAY_SHARE_EXACT as MAY_SHARE_EXACT -from .multiarray import RAISE as RAISE -from .multiarray import WRAP as WRAP from .multiarray import ( + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, _Array, _ConstructorEmpty, _KwargsEmpty, @@ -156,6 +156,7 @@ from .multiarray import ( ndarray, nditer, nested_iters, + normalize_axis_index as normalize_axis_index, promote_types, putmask, result_type, @@ -164,7 +165,6 @@ from .multiarray import ( where, zeros, ) -from .multiarray import normalize_axis_index as normalize_axis_index from .numerictypes import ( ScalarType, bool, diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index b649b8f91cd1..4f810da6904b 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,6 +1,5 @@ from builtins import bool as py_bool -from typing import Final, TypedDict, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 39bcf4ba6294..9a6af16e3b23 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -8,8 +8,7 @@ from numpy._utils import set_module -from . import numeric as sb -from . import numerictypes as nt +from . import numeric as sb, numerictypes as nt from .arrayprint import _get_legacy_print_mode # All of the functions allow formats to be a dtype diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index c2a0f0dae789..39de8739db0e 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -5,9 +5,7 @@ import itertools import operator -from . import fromnumeric as _from_nx -from . import numeric as _nx -from . import overrides +from . import fromnumeric as _from_nx, numeric as _nx, overrides from .multiarray import array, asanyarray, normalize_axis_index array_function_dispatch = functools.partial( diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index b4dc1656024f..4d56f1e0c779 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -14,10 +14,8 @@ greater_equal, less, less_equal, - not_equal, -) -from numpy import ( multiply as _multiply_ufunc, + not_equal, ) from numpy._core.multiarray import _vec_string from numpy._core.overrides import array_function_dispatch, set_module @@ -40,6 +38,10 @@ _strip_chars, _strip_whitespace, _zfill, + count as _count_ufunc, + endswith as _endswith_ufunc, + find as _find_ufunc, + index as _index_ufunc, isalnum, isalpha, isdecimal, @@ -49,28 +51,10 @@ isspace, istitle, isupper, - str_len, -) -from numpy._core.umath import ( - count as _count_ufunc, -) -from numpy._core.umath import ( - endswith as _endswith_ufunc, -) -from numpy._core.umath import ( - find as _find_ufunc, -) -from numpy._core.umath import ( - index as _index_ufunc, -) -from numpy._core.umath import ( rfind as _rfind_ufunc, -) -from numpy._core.umath import ( rindex as _rindex_ufunc, -) -from numpy._core.umath import ( startswith as _startswith_ufunc, + str_len, ) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index b187ce71d25c..1b8bfd84cc3d 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,12 +1,16 @@ from typing import TypeAlias, overload import numpy as np -from numpy._typing import NDArray, _AnyShape, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _SupportsArray, +) __all__ = [ "add", diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index 0c49ec00277e..ededced3b9fe 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -18,8 +18,6 @@ def func(arg1, /, arg2, *, arg3): import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, -) -from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 3336286d8c98..4d2082a949b7 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -5,8 +5,8 @@ import numpy as np from numpy._core._multiarray_umath import ( _discover_array_parameters as discover_array_params, + _get_sfloat_dtype, ) -from numpy._core._multiarray_umath import _get_sfloat_dtype from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() diff --git a/numpy/_core/tests/test_indexerrors.py b/numpy/_core/tests/test_indexerrors.py index 02110c28356a..70e97dd6428e 100644 --- a/numpy/_core/tests/test_indexerrors.py +++ b/numpy/_core/tests/test_indexerrors.py @@ -1,8 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_raises, - assert_raises_regex, -) +from numpy.testing import assert_raises, assert_raises_regex class TestIndexErrors: diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 65da65ddc9f9..4ead2fc7ec6f 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -6,8 +6,7 @@ from decimal import Decimal import pytest -from hypothesis import given -from hypothesis import strategies as st +from hypothesis import given, strategies as st from hypothesis.extra import numpy as hynp import numpy as np diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index be3ef0459c82..ea78cd9d9f51 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -4,11 +4,7 @@ import pytest import numpy as np -from numpy.testing import ( - assert_almost_equal, - assert_equal, - assert_warns, -) +from numpy.testing import assert_almost_equal, assert_equal, assert_warns class TestFromString: diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 1c2b606dedf3..8197822cd0e3 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -8,8 +8,7 @@ import pytest import numpy as np -from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype -from numpy._core.tests._natype import pd_NA +from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype, pd_NA from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 001a7bffbcc8..c54b2ac86bc2 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -12,8 +12,7 @@ import numpy as np import numpy._core.umath as ncu -from numpy._core import _umath_tests as ncu_tests -from numpy._core import sctypes +from numpy._core import _umath_tests as ncu_tests, sctypes from numpy.testing import ( HAS_REFCOUNT, IS_MUSL, diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 16a7eee66ebd..0044749e3dce 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,148 +1,158 @@ """Private counterpart of ``numpy.typing``.""" -from ._array_like import ArrayLike as ArrayLike -from ._array_like import NDArray as NDArray -from ._array_like import _ArrayLike as _ArrayLike -from ._array_like import _ArrayLikeAnyString_co as _ArrayLikeAnyString_co -from ._array_like import _ArrayLikeBool_co as _ArrayLikeBool_co -from ._array_like import _ArrayLikeBytes_co as _ArrayLikeBytes_co -from ._array_like import _ArrayLikeComplex128_co as _ArrayLikeComplex128_co -from ._array_like import _ArrayLikeComplex_co as _ArrayLikeComplex_co -from ._array_like import _ArrayLikeDT64_co as _ArrayLikeDT64_co -from ._array_like import _ArrayLikeFloat64_co as _ArrayLikeFloat64_co -from ._array_like import _ArrayLikeFloat_co as _ArrayLikeFloat_co -from ._array_like import _ArrayLikeInt as _ArrayLikeInt -from ._array_like import _ArrayLikeInt_co as _ArrayLikeInt_co -from ._array_like import _ArrayLikeNumber_co as _ArrayLikeNumber_co -from ._array_like import _ArrayLikeObject_co as _ArrayLikeObject_co -from ._array_like import _ArrayLikeStr_co as _ArrayLikeStr_co -from ._array_like import _ArrayLikeString_co as _ArrayLikeString_co -from ._array_like import _ArrayLikeTD64_co as _ArrayLikeTD64_co -from ._array_like import _ArrayLikeUInt_co as _ArrayLikeUInt_co -from ._array_like import _ArrayLikeVoid_co as _ArrayLikeVoid_co -from ._array_like import _FiniteNestedSequence as _FiniteNestedSequence -from ._array_like import _SupportsArray as _SupportsArray -from ._array_like import _SupportsArrayFunc as _SupportsArrayFunc +from ._array_like import ( + ArrayLike as ArrayLike, + NDArray as NDArray, + _ArrayLike as _ArrayLike, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, +) # -from ._char_codes import _BoolCodes as _BoolCodes -from ._char_codes import _ByteCodes as _ByteCodes -from ._char_codes import _BytesCodes as _BytesCodes -from ._char_codes import _CDoubleCodes as _CDoubleCodes -from ._char_codes import _CharacterCodes as _CharacterCodes -from ._char_codes import _CLongDoubleCodes as _CLongDoubleCodes -from ._char_codes import _Complex64Codes as _Complex64Codes -from ._char_codes import _Complex128Codes as _Complex128Codes -from ._char_codes import _ComplexFloatingCodes as _ComplexFloatingCodes -from ._char_codes import _CSingleCodes as _CSingleCodes -from ._char_codes import _DoubleCodes as _DoubleCodes -from ._char_codes import _DT64Codes as _DT64Codes -from ._char_codes import _FlexibleCodes as _FlexibleCodes -from ._char_codes import _Float16Codes as _Float16Codes -from ._char_codes import _Float32Codes as _Float32Codes -from ._char_codes import _Float64Codes as _Float64Codes -from ._char_codes import _FloatingCodes as _FloatingCodes -from ._char_codes import _GenericCodes as _GenericCodes -from ._char_codes import _HalfCodes as _HalfCodes -from ._char_codes import _InexactCodes as _InexactCodes -from ._char_codes import _Int8Codes as _Int8Codes -from ._char_codes import _Int16Codes as _Int16Codes -from ._char_codes import _Int32Codes as _Int32Codes -from ._char_codes import _Int64Codes as _Int64Codes -from ._char_codes import _IntCCodes as _IntCCodes -from ._char_codes import _IntCodes as _IntCodes -from ._char_codes import _IntegerCodes as _IntegerCodes -from ._char_codes import _IntPCodes as _IntPCodes -from ._char_codes import _LongCodes as _LongCodes -from ._char_codes import _LongDoubleCodes as _LongDoubleCodes -from ._char_codes import _LongLongCodes as _LongLongCodes -from ._char_codes import _NumberCodes as _NumberCodes -from ._char_codes import _ObjectCodes as _ObjectCodes -from ._char_codes import _ShortCodes as _ShortCodes -from ._char_codes import _SignedIntegerCodes as _SignedIntegerCodes -from ._char_codes import _SingleCodes as _SingleCodes -from ._char_codes import _StrCodes as _StrCodes -from ._char_codes import _StringCodes as _StringCodes -from ._char_codes import _TD64Codes as _TD64Codes -from ._char_codes import _UByteCodes as _UByteCodes -from ._char_codes import _UInt8Codes as _UInt8Codes -from ._char_codes import _UInt16Codes as _UInt16Codes -from ._char_codes import _UInt32Codes as _UInt32Codes -from ._char_codes import _UInt64Codes as _UInt64Codes -from ._char_codes import _UIntCCodes as _UIntCCodes -from ._char_codes import _UIntCodes as _UIntCodes -from ._char_codes import _UIntPCodes as _UIntPCodes -from ._char_codes import _ULongCodes as _ULongCodes -from ._char_codes import _ULongLongCodes as _ULongLongCodes -from ._char_codes import _UnsignedIntegerCodes as _UnsignedIntegerCodes -from ._char_codes import _UShortCodes as _UShortCodes -from ._char_codes import _VoidCodes as _VoidCodes +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _ByteCodes as _ByteCodes, + _BytesCodes as _BytesCodes, + _CDoubleCodes as _CDoubleCodes, + _CharacterCodes as _CharacterCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _CSingleCodes as _CSingleCodes, + _DoubleCodes as _DoubleCodes, + _DT64Codes as _DT64Codes, + _FlexibleCodes as _FlexibleCodes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _FloatingCodes as _FloatingCodes, + _GenericCodes as _GenericCodes, + _HalfCodes as _HalfCodes, + _InexactCodes as _InexactCodes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _IntCCodes as _IntCCodes, + _IntCodes as _IntCodes, + _IntegerCodes as _IntegerCodes, + _IntPCodes as _IntPCodes, + _LongCodes as _LongCodes, + _LongDoubleCodes as _LongDoubleCodes, + _LongLongCodes as _LongLongCodes, + _NumberCodes as _NumberCodes, + _ObjectCodes as _ObjectCodes, + _ShortCodes as _ShortCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _SingleCodes as _SingleCodes, + _StrCodes as _StrCodes, + _StringCodes as _StringCodes, + _TD64Codes as _TD64Codes, + _UByteCodes as _UByteCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _UIntCCodes as _UIntCCodes, + _UIntCodes as _UIntCodes, + _UIntPCodes as _UIntPCodes, + _ULongCodes as _ULongCodes, + _ULongLongCodes as _ULongLongCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, + _UShortCodes as _UShortCodes, + _VoidCodes as _VoidCodes, +) # -from ._dtype_like import DTypeLike as DTypeLike -from ._dtype_like import _DTypeLike as _DTypeLike -from ._dtype_like import _DTypeLikeBool as _DTypeLikeBool -from ._dtype_like import _DTypeLikeBytes as _DTypeLikeBytes -from ._dtype_like import _DTypeLikeComplex as _DTypeLikeComplex -from ._dtype_like import _DTypeLikeComplex_co as _DTypeLikeComplex_co -from ._dtype_like import _DTypeLikeDT64 as _DTypeLikeDT64 -from ._dtype_like import _DTypeLikeFloat as _DTypeLikeFloat -from ._dtype_like import _DTypeLikeInt as _DTypeLikeInt -from ._dtype_like import _DTypeLikeObject as _DTypeLikeObject -from ._dtype_like import _DTypeLikeStr as _DTypeLikeStr -from ._dtype_like import _DTypeLikeTD64 as _DTypeLikeTD64 -from ._dtype_like import _DTypeLikeUInt as _DTypeLikeUInt -from ._dtype_like import _DTypeLikeVoid as _DTypeLikeVoid -from ._dtype_like import _SupportsDType as _SupportsDType -from ._dtype_like import _VoidDTypeLike as _VoidDTypeLike +from ._dtype_like import ( + DTypeLike as DTypeLike, + _DTypeLike as _DTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeVoid as _DTypeLikeVoid, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, +) # -from ._nbit import _NBitByte as _NBitByte -from ._nbit import _NBitDouble as _NBitDouble -from ._nbit import _NBitHalf as _NBitHalf -from ._nbit import _NBitInt as _NBitInt -from ._nbit import _NBitIntC as _NBitIntC -from ._nbit import _NBitIntP as _NBitIntP -from ._nbit import _NBitLong as _NBitLong -from ._nbit import _NBitLongDouble as _NBitLongDouble -from ._nbit import _NBitLongLong as _NBitLongLong -from ._nbit import _NBitShort as _NBitShort -from ._nbit import _NBitSingle as _NBitSingle +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitDouble as _NBitDouble, + _NBitHalf as _NBitHalf, + _NBitInt as _NBitInt, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitLong as _NBitLong, + _NBitLongDouble as _NBitLongDouble, + _NBitLongLong as _NBitLongLong, + _NBitShort as _NBitShort, + _NBitSingle as _NBitSingle, +) # from ._nbit_base import ( NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, ) -from ._nbit_base import _8Bit as _8Bit -from ._nbit_base import _16Bit as _16Bit -from ._nbit_base import _32Bit as _32Bit -from ._nbit_base import _64Bit as _64Bit -from ._nbit_base import _96Bit as _96Bit -from ._nbit_base import _128Bit as _128Bit # from ._nested_sequence import _NestedSequence as _NestedSequence # -from ._scalars import _BoolLike_co as _BoolLike_co -from ._scalars import _CharLike_co as _CharLike_co -from ._scalars import _ComplexLike_co as _ComplexLike_co -from ._scalars import _FloatLike_co as _FloatLike_co -from ._scalars import _IntLike_co as _IntLike_co -from ._scalars import _NumberLike_co as _NumberLike_co -from ._scalars import _ScalarLike_co as _ScalarLike_co -from ._scalars import _TD64Like_co as _TD64Like_co -from ._scalars import _UIntLike_co as _UIntLike_co -from ._scalars import _VoidLike_co as _VoidLike_co +from ._scalars import ( + _BoolLike_co as _BoolLike_co, + _CharLike_co as _CharLike_co, + _ComplexLike_co as _ComplexLike_co, + _FloatLike_co as _FloatLike_co, + _IntLike_co as _IntLike_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _TD64Like_co as _TD64Like_co, + _UIntLike_co as _UIntLike_co, + _VoidLike_co as _VoidLike_co, +) # -from ._shape import _AnyShape as _AnyShape -from ._shape import _Shape as _Shape -from ._shape import _ShapeLike as _ShapeLike +from ._shape import _AnyShape as _AnyShape, _Shape as _Shape, _ShapeLike as _ShapeLike # -from ._ufunc import _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1 -from ._ufunc import _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2 -from ._ufunc import _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2 +from ._ufunc import ( + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, +) diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 21df1d983fe6..1ce7de5bb423 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -38,11 +38,7 @@ from . import NBitBase from ._array_like import NDArray from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -from ._scalars import ( - _BoolLike_co, - _IntLike_co, - _NumberLike_co, -) +from ._scalars import _BoolLike_co, _IntLike_co, _NumberLike_co _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index c406b3098384..526fb86dd322 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,12 +1,5 @@ from collections.abc import Sequence # noqa: F811 -from typing import ( - Any, - Protocol, - TypeAlias, - TypedDict, - TypeVar, - runtime_checkable, -) +from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar, runtime_checkable import numpy as np diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index 2ed4e88b3e32..b630777ced99 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -2,8 +2,7 @@ from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, TypeVar, overload, type_check_only -from ._convertions import asbytes as asbytes -from ._convertions import asunicode as asunicode +from ._convertions import asbytes as asbytes, asunicode as asunicode ### diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 2c338d4e5b14..11ae02e57a59 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -5,14 +5,12 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NamedTuple, TypeVar, final, type_check_only, ) -from typing import ( - Literal as L, -) from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/numpy/ctypeslib/__init__.pyi b/numpy/ctypeslib/__init__.pyi index adc51da2696c..f088d0281d33 100644 --- a/numpy/ctypeslib/__init__.pyi +++ b/numpy/ctypeslib/__init__.pyi @@ -3,31 +3,13 @@ from ctypes import c_int64 as _c_intp from ._ctypeslib import ( __all__ as __all__, -) -from ._ctypeslib import ( __doc__ as __doc__, -) -from ._ctypeslib import ( _concrete_ndptr as _concrete_ndptr, -) -from ._ctypeslib import ( _ndptr as _ndptr, -) -from ._ctypeslib import ( as_array as as_array, -) -from ._ctypeslib import ( as_ctypes as as_ctypes, -) -from ._ctypeslib import ( as_ctypes_type as as_ctypes_type, -) -from ._ctypeslib import ( c_intp as c_intp, -) -from ._ctypeslib import ( load_library as load_library, -) -from ._ctypeslib import ( ndpointer as ndpointer, ) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index aecb3899bdf5..996b2c0be388 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -4,15 +4,7 @@ import ctypes from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp -from typing import ( - Any, - ClassVar, - Generic, - TypeAlias, - TypeVar, - overload, -) -from typing import Literal as L +from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload import numpy as np from numpy import ( diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index f76b08fc28dc..36844a90d31f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -2,6 +2,7 @@ from typing import ( Any, Generic, + Literal as L, LiteralString, Never, NoReturn, @@ -11,7 +12,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar import numpy as np diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index d12f47e80a7d..aa7d5918f7d2 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,5 +1,4 @@ -from .f2py2e import main as main -from .f2py2e import run_main +from .f2py2e import main as main, run_main __all__ = ["get_include", "run_main"] diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index 67baf9b76845..bcb2b0304401 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable from pathlib import Path -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L from typing_extensions import override from ._backend import Backend diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index dfbae5c7d94d..32e381cf9a1c 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,8 +1,7 @@ from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show -from typing import Any, Final, Never, TypeAlias, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload from .cfuncs import errmess diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index c5f4fd7585ba..742d358916a2 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,8 +1,17 @@ import re from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping -from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload -from typing import Literal as L +from typing import ( + IO, + Any, + Concatenate, + Final, + Literal as L, + Never, + ParamSpec, + TypeAlias, + overload, +) from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 03aeffc5dcdd..46794e552b41 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -6,8 +6,7 @@ from typing import Any, Final, NotRequired, TypedDict, type_check_only from typing_extensions import TypeVar, override from .__version__ import version -from .auxfuncs import _Bool -from .auxfuncs import outmess as outmess +from .auxfuncs import _Bool, outmess as outmess ### diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index 58614060ba87..30439f6b8351 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Iterable, Mapping -from typing import Any, Final, TypeAlias -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeAlias from typing_extensions import TypeVar from .__version__ import version diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index e7b14f751dc3..06be2bb16044 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload -from typing import Literal as L +from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload from typing_extensions import TypeVar __all__ = ["Expr"] diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index ce223a555456..ecf884fb4999 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -5,8 +5,6 @@ from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, -) -from numpy.f2py.crackfortran import ( _selected_real_kind_func as selected_real_kind, ) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 54d0ea8c79b6..893a697f1398 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,9 +1,4 @@ -from ._helper import ( - fftfreq, - fftshift, - ifftshift, - rfftfreq, -) +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq from ._pocketfft import ( fft, fft2, diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index d06bda7ad9a9..1ea451ec2eb1 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar, overload from numpy import complexfloating, floating, generic, integer from numpy._typing import ( diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 215cf14d1395..4f5e5c944b4c 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,6 +1,5 @@ from collections.abc import Sequence -from typing import Literal as L -from typing import TypeAlias +from typing import Literal as L, TypeAlias from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 7cf391a12e1d..5147652172a6 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,5 +1,4 @@ -from typing import Any -from typing import Literal as L +from typing import Any, Literal as L from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 6185a494d035..5a85743e4d0f 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -3,28 +3,36 @@ from numpy._core.multiarray import add_docstring, tracemalloc_domain # all submodules of `lib` are accessible at runtime through `__getattr__`, # so we implicitly re-export them here -from . import _array_utils_impl as _array_utils_impl -from . import _arraypad_impl as _arraypad_impl -from . import _arraysetops_impl as _arraysetops_impl -from . import _arrayterator_impl as _arrayterator_impl -from . import _datasource as _datasource -from . import _format_impl as _format_impl -from . import _function_base_impl as _function_base_impl -from . import _histograms_impl as _histograms_impl -from . import _index_tricks_impl as _index_tricks_impl -from . import _iotools as _iotools -from . import _nanfunctions_impl as _nanfunctions_impl -from . import _npyio_impl as _npyio_impl -from . import _polynomial_impl as _polynomial_impl -from . import _scimath_impl as _scimath_impl -from . import _shape_base_impl as _shape_base_impl -from . import _stride_tricks_impl as _stride_tricks_impl -from . import _twodim_base_impl as _twodim_base_impl -from . import _type_check_impl as _type_check_impl -from . import _ufunclike_impl as _ufunclike_impl -from . import _utils_impl as _utils_impl -from . import _version as _version -from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks +from . import ( + _array_utils_impl as _array_utils_impl, + _arraypad_impl as _arraypad_impl, + _arraysetops_impl as _arraysetops_impl, + _arrayterator_impl as _arrayterator_impl, + _datasource as _datasource, + _format_impl as _format_impl, + _function_base_impl as _function_base_impl, + _histograms_impl as _histograms_impl, + _index_tricks_impl as _index_tricks_impl, + _iotools as _iotools, + _nanfunctions_impl as _nanfunctions_impl, + _npyio_impl as _npyio_impl, + _polynomial_impl as _polynomial_impl, + _scimath_impl as _scimath_impl, + _shape_base_impl as _shape_base_impl, + _stride_tricks_impl as _stride_tricks_impl, + _twodim_base_impl as _twodim_base_impl, + _type_check_impl as _type_check_impl, + _ufunclike_impl as _ufunclike_impl, + _utils_impl as _utils_impl, + _version as _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 46b43762b87f..6158f299efe2 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,22 +1,15 @@ from typing import ( Any, + Literal as L, Protocol, TypeAlias, TypeVar, overload, type_check_only, ) -from typing import ( - Literal as L, -) from numpy import generic -from numpy._typing import ( - ArrayLike, - NDArray, - _ArrayLike, - _ArrayLikeInt, -) +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt __all__ = ["pad"] diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 4279b809f78e..c0291680a8ec 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,5 +1,12 @@ -from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload -from typing import Literal as L +from typing import ( + Any, + Generic, + Literal as L, + NamedTuple, + SupportsIndex, + TypeAlias, + overload, +) from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 096043e6316f..f5690a829fc6 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -10,9 +10,14 @@ from numpy._core import overrides, transpose from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum -from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index -from numpy._core.multiarray import interp as compiled_interp -from numpy._core.multiarray import interp_complex as compiled_interp_complex +from numpy._core.multiarray import ( + _monotonicity, + _place, + bincount, + interp as compiled_interp, + interp_complex as compiled_interp_complex, + normalize_axis_index, +) from numpy._core.numeric import ( absolute, arange, diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index cb6e18b53fa4..78947b1b3b46 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -4,6 +4,7 @@ from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Concatenate, + Literal as L, ParamSpec, Protocol, SupportsIndex, @@ -13,7 +14,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeIs, deprecated import numpy as np diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 5e7afb5e397b..06987dd71e6b 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,17 +1,7 @@ from collections.abc import Sequence -from typing import ( - Any, - SupportsIndex, - TypeAlias, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, SupportsIndex, TypeAlias -from numpy._typing import ( - ArrayLike, - NDArray, -) +from numpy._typing import ArrayLike, NDArray __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index c4509d9aa3ad..208a8e868b48 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,7 +1,16 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload -from typing import Literal as L +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + Self, + SupportsIndex, + final, + overload, +) from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_nanfunctions_impl.pyi b/numpy/lib/_nanfunctions_impl.pyi index f39800d58d07..fd5d277cbd7d 100644 --- a/numpy/lib/_nanfunctions_impl.pyi +++ b/numpy/lib/_nanfunctions_impl.pyi @@ -11,11 +11,7 @@ from numpy._core.fromnumeric import ( sum, var, ) -from numpy.lib._function_base_impl import ( - median, - percentile, - quantile, -) +from numpy.lib._function_base_impl import median, percentile, quantile __all__ = [ "nansum", diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 94f014ccd52d..253fcb0fdf9e 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -14,13 +14,13 @@ from typing import ( Any, ClassVar, Generic, + Literal as L, Protocol, Self, TypeAlias, overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar, deprecated, override import numpy as np diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index faf2f01e6a22..3da1a2af60c9 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,5 +1,6 @@ from typing import ( Any, + Literal as L, NoReturn, SupportsIndex, SupportsInt, @@ -7,9 +8,6 @@ from typing import ( TypeVar, overload, ) -from typing import ( - Literal as L, -) import numpy as np from numpy import ( diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 43df38ed5b06..9e70d0a617f6 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,13 +1,5 @@ from collections.abc import Callable, Sequence -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, TypeAlias, TypeVar, overload import numpy as np from numpy import ( diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index b9ab2a02f5f5..5e98ad22ca8b 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,7 +1,6 @@ from _typeshed import Incomplete from collections.abc import Container, Iterable -from typing import Any, Protocol, TypeAlias, overload, type_check_only -from typing import Literal as L +from typing import Any, Literal as L, Protocol, TypeAlias, overload, type_check_only from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/array_utils.pyi b/numpy/lib/array_utils.pyi index 8adc3c5b22a6..4b9ebe334a1f 100644 --- a/numpy/lib/array_utils.pyi +++ b/numpy/lib/array_utils.pyi @@ -1,12 +1,6 @@ from ._array_utils_impl import ( __all__ as __all__, -) -from ._array_utils_impl import ( byte_bounds as byte_bounds, -) -from ._array_utils_impl import ( normalize_axis_index as normalize_axis_index, -) -from ._array_utils_impl import ( normalize_axis_tuple as normalize_axis_tuple, ) diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index dd9470e1e6a3..c29e18fe0581 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,66 +1,24 @@ from ._format_impl import ( ARRAY_ALIGN as ARRAY_ALIGN, -) -from ._format_impl import ( BUFFER_SIZE as BUFFER_SIZE, -) -from ._format_impl import ( EXPECTED_KEYS as EXPECTED_KEYS, -) -from ._format_impl import ( GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, -) -from ._format_impl import ( MAGIC_LEN as MAGIC_LEN, -) -from ._format_impl import ( MAGIC_PREFIX as MAGIC_PREFIX, -) -from ._format_impl import ( __all__ as __all__, -) -from ._format_impl import ( __doc__ as __doc__, -) -from ._format_impl import ( descr_to_dtype as descr_to_dtype, -) -from ._format_impl import ( drop_metadata as drop_metadata, -) -from ._format_impl import ( dtype_to_descr as dtype_to_descr, -) -from ._format_impl import ( header_data_from_array_1_0 as header_data_from_array_1_0, -) -from ._format_impl import ( isfileobj as isfileobj, -) -from ._format_impl import ( magic as magic, -) -from ._format_impl import ( open_memmap as open_memmap, -) -from ._format_impl import ( read_array as read_array, -) -from ._format_impl import ( read_array_header_1_0 as read_array_header_1_0, -) -from ._format_impl import ( read_array_header_2_0 as read_array_header_2_0, -) -from ._format_impl import ( read_magic as read_magic, -) -from ._format_impl import ( write_array as write_array, -) -from ._format_impl import ( write_array_header_1_0 as write_array_header_1_0, -) -from ._format_impl import ( write_array_header_2_0 as write_array_header_2_0, ) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index a7e4c93932c6..5526a332fead 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -65,8 +65,7 @@ def opt_func_info(func_name=None, signature=None): """ import re - from numpy._core._multiarray_umath import __cpu_targets_info__ as targets - from numpy._core._multiarray_umath import dtype + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets, dtype if func_name is not None: func_pattern = re.compile(func_name) diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index 4f4801feac8f..f23c58fa6586 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,6 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any -from typing import Literal as L +from typing import Any, Literal as L from numpy import ufunc diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index 49fb4d1fc736..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,9 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, -) -from numpy.lib._npyio_impl import ( NpzFile as NpzFile, -) -from numpy.lib._npyio_impl import ( __doc__ as __doc__, ) diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index 253235dfc576..ef2772a33a47 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,30 +1,12 @@ from ._scimath_impl import ( __all__ as __all__, -) -from ._scimath_impl import ( arccos as arccos, -) -from ._scimath_impl import ( arcsin as arcsin, -) -from ._scimath_impl import ( arctanh as arctanh, -) -from ._scimath_impl import ( log as log, -) -from ._scimath_impl import ( log2 as log2, -) -from ._scimath_impl import ( log10 as log10, -) -from ._scimath_impl import ( logn as logn, -) -from ._scimath_impl import ( power as power, -) -from ._scimath_impl import ( sqrt as sqrt, ) diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index 42d8fe9ef43b..eb46f28ae5f4 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -1,6 +1,4 @@ from numpy.lib._stride_tricks_impl import ( as_strided as as_strided, -) -from numpy.lib._stride_tricks_impl import ( sliding_window_view as sliding_window_view, ) diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 1581ffbe95fd..548a3db2dc07 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -10,12 +10,7 @@ flatten_dtype, has_nested_fields, ) -from numpy.testing import ( - assert_, - assert_allclose, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_allclose, assert_equal, assert_raises class TestLineSplitter: diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 16c8048c1a11..53c115f7bd65 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,6 +1,4 @@ -from . import _linalg as _linalg -from . import _umath_linalg as _umath_linalg -from . import linalg as linalg +from . import _linalg as _linalg, _umath_linalg as _umath_linalg, linalg as linalg from ._linalg import ( cholesky, cond, diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index d7850c4a0204..52a2ffb8f50b 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -35,7 +35,9 @@ cdouble, complexfloating, count_nonzero, + cross as _core_cross, csingle, + diagonal as _core_diagonal, divide, dot, double, @@ -49,10 +51,13 @@ intp, isfinite, isnan, + matmul as _core_matmul, + matrix_transpose as _core_matrix_transpose, moveaxis, multiply, newaxis, object_, + outer as _core_outer, overrides, prod, reciprocal, @@ -62,34 +67,11 @@ sqrt, sum, swapaxes, - zeros, -) -from numpy._core import ( - cross as _core_cross, -) -from numpy._core import ( - diagonal as _core_diagonal, -) -from numpy._core import ( - matmul as _core_matmul, -) -from numpy._core import ( - matrix_transpose as _core_matrix_transpose, -) -from numpy._core import ( - outer as _core_outer, -) -from numpy._core import ( tensordot as _core_tensordot, -) -from numpy._core import ( trace as _core_trace, -) -from numpy._core import ( transpose as _core_transpose, -) -from numpy._core import ( vecdot as _core_vecdot, + zeros, ) from numpy._globals import _NoValue from numpy._typing import NDArray diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 3f318a892da5..68bd6d933921 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,6 +1,7 @@ from collections.abc import Iterable from typing import ( Any, + Literal as L, NamedTuple, Never, SupportsIndex, @@ -9,7 +10,6 @@ from typing import ( TypeVar, overload, ) -from typing import Literal as L import numpy as np from numpy import ( diff --git a/numpy/linalg/_umath_linalg.pyi b/numpy/linalg/_umath_linalg.pyi index cd07acdb1f9e..f90706a7b159 100644 --- a/numpy/linalg/_umath_linalg.pyi +++ b/numpy/linalg/_umath_linalg.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 05ea373a6a12..5e231fbbadb7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -35,6 +35,7 @@ amax, amin, angle, + array as narray, # noqa: F401 bool_, expand_dims, finfo, # noqa: F401 @@ -42,7 +43,6 @@ iscomplexobj, ndarray, ) -from numpy import array as narray # noqa: F401 from numpy._core import multiarray as mu from numpy._core.numeric import normalize_axis_tuple from numpy._utils import set_module diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 094c1e26b191..381cf75f00c3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -23,8 +23,7 @@ import warnings import numpy as np -from numpy import array as nxarray -from numpy import ndarray +from numpy import array as nxarray, ndarray from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index 0da915101511..e0b0db24904c 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -8,9 +8,11 @@ import numpy as np import numpy.ma as ma -from numpy._core.records import fromarrays as recfromarrays -from numpy._core.records import fromrecords as recfromrecords -from numpy._core.records import recarray +from numpy._core.records import ( + fromarrays as recfromarrays, + fromrecords as recfromrecords, + recarray, +) from numpy.ma import masked, nomask from numpy.ma.mrecords import ( MaskedRecords, @@ -20,11 +22,7 @@ fromtextfile, mrecarray, ) -from numpy.ma.testutils import ( - assert_, - assert_equal, - assert_equal_records, -) +from numpy.ma.testutils import assert_, assert_equal, assert_equal_records from numpy.testing import temppath diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 30c3311798fc..f5d6d3ec27b9 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -83,11 +83,7 @@ where, zeros, ) -from numpy.testing import ( - assert_, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_equal, assert_raises pi = np.pi diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 4e639653bda4..6ce03b36abde 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -12,13 +12,13 @@ PinvCases, SolveCases, SVDCases, + TestQR as _TestQR, _TestNorm2D, _TestNormDoubleBase, _TestNormInt64Base, _TestNormSingleBase, apply_tag, ) -from numpy.linalg.tests.test_linalg import TestQR as _TestQR CASES = [] diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index ec342df0f9d1..85c92816a261 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Iterable -from typing import Any, Concatenate, Final, Self, TypeVar, overload -from typing import Literal as L +from typing import Any, Concatenate, Final, Literal as L, Self, TypeVar, overload import numpy as np import numpy.typing as npt diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index f7d907c1b39d..07db43d0c000 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar import numpy as np diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index e8013e66b62f..94ad7248f268 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar import numpy as np diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 6f67257a607c..a2b84f72bab7 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 35ea2ffd2bf2..d81f3e6f54a4 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index b4c784492b50..2942adf2afa8 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index c627e16dca1d..65ae4e5503b2 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,12 +1,5 @@ from collections.abc import Callable, Iterable, Sequence -from typing import ( - Final, - Literal, - SupportsIndex, - TypeAlias, - TypeVar, - overload, -) +from typing import Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 2cead454631c..14777ac60375 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises def trim(x): diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index d10aafbda866..156dccf6ea88 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -18,12 +18,7 @@ Legendre, Polynomial, ) -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises # # fixtures diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 8bd3951f4241..a289ba0b50cc 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises H0 = np.array([1]) H1 = np.array([0, 2]) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 29f34f66380e..233dfb28254a 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises He0 = np.array([1]) He1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 6793b780416d..884f15a9fe8f 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) / 1 L1 = np.array([1, -1]) / 1 diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index d0ed7060cbe7..6c87f44ee707 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) L1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index 96e88b9de1fa..a6f5e3990b6b 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -3,12 +3,7 @@ """ import numpy as np import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises class TestMisc: diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 6ccc6180657c..e71be8acd981 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -4,11 +4,7 @@ import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 39b7d8c719ac..de52582c2b56 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -2,11 +2,7 @@ import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 59a7539b69f1..d5584e511796 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -14,6 +14,7 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NoReturn, ParamSpec, Self, @@ -23,7 +24,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar from unittest.case import SkipTest diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 3e6ded326941..9787bcbbc101 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -7,13 +7,7 @@ import pytest import numpy.exceptions as ex -from numpy.testing import ( - IS_WASM, - assert_, - assert_equal, - assert_raises, - assert_warns, -) +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises, assert_warns def test_numpy_reloading(): diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index d8ce95887bce..01b743941cf2 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -5,8 +5,7 @@ import os import subprocess import sys -from os.path import dirname, isfile -from os.path import join as pathjoin +from os.path import dirname, isfile, join as pathjoin import pytest diff --git a/ruff.toml b/ruff.toml index 7454c6c05e5b..61e849af1d69 100644 --- a/ruff.toml +++ b/ruff.toml @@ -130,3 +130,5 @@ builtins-allowed-modules = ["random", "typing"] # these are treated as stdlib within .pyi stubs extra-standard-library = ["_typeshed", "typing_extensions"] known-first-party = ["numpy"] +combine-as-imports = true +split-on-trailing-comma = false From 7311f0a55bdf6f363024467247bef4464df08e25 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 26 Jun 2025 03:08:15 +0200 Subject: [PATCH 151/294] MAINT: Fix ``I001`` ruff error on main (#29272) --- numpy/_core/__init__.pyi | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index a8884917f34a..ce5427bbfcd9 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -65,9 +65,9 @@ from .fromnumeric import ( take, trace, transpose, + transpose as permute_dims, var, ) -from .fromnumeric import transpose as permute_dims from .function_base import geomspace, linspace, logspace from .getlimits import finfo, iinfo from .memmap import memmap @@ -91,6 +91,7 @@ from .numeric import ( broadcast, can_cast, concatenate, + concatenate as concat, convolve, copyto, correlate, @@ -145,7 +146,6 @@ from .numeric import ( zeros, zeros_like, ) -from .numeric import concatenate as concat from .numerictypes import ( ScalarType, bool, @@ -228,14 +228,22 @@ from .shape_base import ( ) from .umath import ( absolute, + absolute as abs, add, arccos, + arccos as acos, arccosh, + arccosh as acosh, arcsin, + arcsin as asin, arcsinh, + arcsinh as asinh, arctan, + arctan as atan, arctan2, + arctan2 as atan2, arctanh, + arctanh as atanh, bitwise_and, bitwise_count, bitwise_or, @@ -272,6 +280,7 @@ from .umath import ( heaviside, hypot, invert, + invert as bitwise_invert, isfinite, isinf, isnan, @@ -279,6 +288,7 @@ from .umath import ( lcm, ldexp, left_shift, + left_shift as bitwise_left_shift, less, less_equal, log, @@ -303,11 +313,13 @@ from .umath import ( pi, positive, power, + power as pow, rad2deg, radians, reciprocal, remainder, right_shift, + right_shift as bitwise_right_shift, rint, sign, signbit, @@ -323,18 +335,6 @@ from .umath import ( trunc, vecmat, ) -from .umath import absolute as abs -from .umath import arccos as acos -from .umath import arccosh as acosh -from .umath import arcsin as asin -from .umath import arcsinh as asinh -from .umath import arctan as atan -from .umath import arctan2 as atan2 -from .umath import arctanh as atanh -from .umath import invert as bitwise_invert -from .umath import left_shift as bitwise_left_shift -from .umath import power as pow -from .umath import right_shift as bitwise_right_shift __all__ = [ "False_", From 004458d21696a2867ed8142b172ada4bb4607b03 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 26 Jun 2025 05:41:55 +0200 Subject: [PATCH 152/294] TYP: Work around a mypy issue with bool arrays (#29248) --- numpy/__init__.pyi | 6 +++--- numpy/typing/tests/data/reveal/array_constructors.pyi | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 3 +-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9fae513bdf39..d85c506ae659 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3848,14 +3848,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def imag(self) -> np.bool[L[False]]: ... - @overload - def __init__(self: np.bool[L[False]], /) -> None: ... + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __init__(self: np.bool[builtins.bool], value: Never, /) -> None: ... @overload def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... @overload def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... @overload - def __init__(self, value: object, /) -> None: ... + def __init__(self: np.bool[builtins.bool], value: object, /) -> None: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 7b27d57bfe23..45cc986d33a6 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -39,6 +39,8 @@ assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 60ee04218489..0fa657240f0b 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -357,8 +357,7 @@ assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) assert_type(np.ma.nomask, np.bool[Literal[False]]) -# https://github.com/python/mypy/issues/18974 -assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] +assert_type(np.ma.MaskType, type[np.bool]) assert_type(MAR_1d.__setmask__([True, False]), None) assert_type(MAR_1d.__setmask__(np.False_), None) From bd683804a22657972c3b63987ed2fcace5fbf1b8 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 26 Jun 2025 16:58:22 +0100 Subject: [PATCH 153/294] TYP: Add overloads for `MaskedArray.__{div,rdiv,floordiv,rfloordiv}__` (#29271) --- numpy/__init__.pyi | 4 + numpy/ma/core.pyi | 134 +++++++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 165 ++++++++++++++++++++++++++ 3 files changed, 296 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d85c506ae659..e6a104d6b269 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3091,6 +3091,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__truediv__` @overload def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3122,6 +3123,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rtruediv__` @overload def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3151,6 +3153,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__floordiv__` @overload def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload @@ -3180,6 +3183,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rfloordiv__` @overload def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 6c0de39594dd..caea284bb566 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -30,6 +30,8 @@ from numpy import ( floating, generic, inexact, + int8, + int64, int_, integer, intp, @@ -260,16 +262,18 @@ _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] _MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] _MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] -_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] -_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] _MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @@ -742,10 +746,126 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __truediv__(self, other): ... - def __rtruediv__(self, other): ... - def __floordiv__(self, other): ... - def __rfloordiv__(self, other): ... + # Keep in sync with `ndarray.__truediv__` + @overload + def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __truediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __truediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rtruediv__` + @overload + def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __rtruediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rtruediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[integer | floating], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rtruediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__floordiv__` + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __floordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rfloordiv__` + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[floating | integer], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self, other, mod: None = None, /): ... def __rpow__(self, other, mod: None = None, /): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 0fa657240f0b..a101ba0d48b0 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -50,6 +50,7 @@ MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] b: np.bool f4: np.float32 f: float +i: int assert_type(MAR_1d.shape, tuple[int]) @@ -719,3 +720,167 @@ assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] + +# MaskedArray "true" division + +assert_type(MAR_f8 / b, MaskedArray[np.float64]) +assert_type(MAR_f8 / i, MaskedArray[np.float64]) +assert_type(MAR_f8 / f, MaskedArray[np.float64]) + +assert_type(b / MAR_f8, MaskedArray[np.float64]) +assert_type(i / MAR_f8, MaskedArray[np.float64]) +assert_type(f / MAR_f8, MaskedArray[np.float64]) + +assert_type(MAR_b / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_b, Any) + +assert_type(MAR_u4 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_u4, Any) + +assert_type(MAR_i8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_i8, Any) + +assert_type(MAR_f8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_f8, Any) + +assert_type(MAR_td64 / AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_td64, MaskedArray[np.float64]) +assert_type(MAR_td64 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 / MAR_td64, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_td64, Any) + +assert_type(MAR_o / AR_LIKE_b, Any) +assert_type(MAR_o / AR_LIKE_u, Any) +assert_type(MAR_o / AR_LIKE_i, Any) +assert_type(MAR_o / AR_LIKE_f, Any) +assert_type(MAR_o / AR_LIKE_td64, Any) +assert_type(MAR_o / AR_LIKE_dt64, Any) +assert_type(MAR_o / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_o, Any) +assert_type(AR_LIKE_u / MAR_o, Any) +assert_type(AR_LIKE_i / MAR_o, Any) +assert_type(AR_LIKE_f / MAR_o, Any) +assert_type(AR_LIKE_td64 / MAR_o, Any) +assert_type(AR_LIKE_dt64 / MAR_o, Any) +assert_type(AR_LIKE_o / MAR_o, Any) + +# MaskedArray floor division + +assert_type(MAR_b // AR_LIKE_b, MaskedArray[np.int8]) +assert_type(MAR_b // AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_b, MaskedArray[np.int8]) +assert_type(AR_LIKE_u // MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i // MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_o // MAR_b, Any) + +assert_type(MAR_u4 // AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 // AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u // MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_u4, Any) + +assert_type(MAR_i8 // AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 // AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_i8, Any) + +assert_type(MAR_f8 // AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 // MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_f8, Any) + +assert_type(MAR_td64 // AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_td64, MaskedArray[np.int64]) +assert_type(MAR_td64 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 // MAR_td64, MaskedArray[np.int64]) +assert_type(AR_LIKE_o // MAR_td64, Any) + +assert_type(MAR_o // AR_LIKE_b, Any) +assert_type(MAR_o // AR_LIKE_u, Any) +assert_type(MAR_o // AR_LIKE_i, Any) +assert_type(MAR_o // AR_LIKE_f, Any) +assert_type(MAR_o // AR_LIKE_td64, Any) +assert_type(MAR_o // AR_LIKE_dt64, Any) +assert_type(MAR_o // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_o, Any) +assert_type(AR_LIKE_u // MAR_o, Any) +assert_type(AR_LIKE_i // MAR_o, Any) +assert_type(AR_LIKE_f // MAR_o, Any) +assert_type(AR_LIKE_td64 // MAR_o, Any) +assert_type(AR_LIKE_dt64 // MAR_o, Any) +assert_type(AR_LIKE_o // MAR_o, Any) From 6eeaae25ddfacf586ee02ddc600618127fa79cfd Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 26 Jun 2025 19:42:09 +0100 Subject: [PATCH 154/294] TYP: fix overloads where `out: _ArrayT` was typed as being the default (#29278) --- numpy/__init__.pyi | 34 +++++++++- numpy/_core/multiarray.pyi | 62 +++++++++++++++++-- .../tests/data/reveal/array_constructors.pyi | 2 +- 3 files changed, 88 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e6a104d6b269..d12e63a8bed9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2401,7 +2401,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, ) -> _ArrayT: ... @overload @@ -2425,7 +2435,16 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... @@ -3655,7 +3674,16 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index aa9e796da10b..560822d68466 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -528,7 +528,6 @@ def concatenate( # type: ignore[misc] casting: _CastingKind | None = ... ) -> NDArray[_ScalarT]: ... @overload -@overload def concatenate( # type: ignore[misc] arrays: SupportsLenAndGetItem[ArrayLike], /, @@ -553,7 +552,17 @@ def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + dtype: DTypeLike = ..., + casting: _CastingKind | None = ... +) -> _ArrayT: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: _ArrayT, *, dtype: DTypeLike = ..., casting: _CastingKind | None = ... @@ -1094,7 +1103,17 @@ def busday_count( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_count( + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @@ -1126,7 +1145,18 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload def busday_offset( # type: ignore[misc] @@ -1156,7 +1186,18 @@ def busday_offset( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload @@ -1181,7 +1222,16 @@ def is_busday( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def is_busday( + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 45cc986d33a6..766547e54b60 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -53,7 +53,7 @@ assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), Any) # pyright correctly infers this as NDArray[float64] +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) From 202df739922a994227771c31371669f50b5b1e27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:36:33 -0600 Subject: [PATCH 155/294] MAINT: Bump github/codeql-action from 3.29.0 to 3.29.1 (#29285) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.0 to 3.29.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ce28f5bb42b7a9f2c824e633a3f6ee835bab6858...39edc492dbe16b1465b0cafca41432d857bdb31a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0342fd92c924..1b8f958b5a21 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e64789006e2c..c23baa2f2b42 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.1.27 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v2.1.27 with: sarif_file: results.sarif From 0f18e7c42e4136cdb1eaa1e1c1eff331f2019950 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 27 Jun 2025 14:38:45 -0600 Subject: [PATCH 156/294] BUG: handle case in mapiter where descriptors might get replaced (#29286) Need to use the actual dtype, not the one passed to the array creation (because it can get replaced). Fixes #29279. --- numpy/_core/src/multiarray/mapping.c | 2 ++ numpy/_core/tests/test_stringdtype.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 7953e32fcbf0..2ce7dcdb234a 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -3013,6 +3013,8 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, if (extra_op == NULL) { goto fail; } + // extra_op_dtype might have been replaced, so get a new reference + extra_op_dtype = PyArray_DESCR(extra_op); } /* diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 8197822cd0e3..52a225619ccf 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -517,6 +517,18 @@ def test_fancy_indexing(string_list): assert_array_equal(a, b) assert a[0] == 'd' * 25 + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) From fb7135f9a512d6e35544aa331a0e38f3d60863ac Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 28 Jun 2025 07:58:33 -0600 Subject: [PATCH 157/294] BUG: Fix macro redefinition (#29289) Also update .clang-format to indent preprocessor conditional blocks. --- .clang-format | 1 + numpy/_core/src/multiarray/alloc.c | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.clang-format b/.clang-format index 034478ae2466..7e94a6fdb47c 100644 --- a/.clang-format +++ b/.clang-format @@ -27,6 +27,7 @@ IncludeCategories: Priority: 1 - Regex: '^<[[:alnum:]_.]+"' Priority: 2 +IndentPPDirectives: AfterHash Language: Cpp PointerAlignment: Right ReflowComments: true diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index cc9c5762a196..8061feed24e5 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -30,14 +30,17 @@ /* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN * use-of-uninitialized-memory warnings less useful. */ -#define USE_ALLOC_CACHE 1 #ifdef Py_GIL_DISABLED -# define USE_ALLOC_CACHE 0 +# define USE_ALLOC_CACHE 0 #elif defined(__has_feature) -# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) -# define USE_ALLOC_CACHE 0 -# endif +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif #endif +#ifndef USE_ALLOC_CACHE +# define USE_ALLOC_CACHE 1 +#endif + # define NBUCKETS 1024 /* number of buckets for data*/ # define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ From 2bb4005d189667b71a6088db358f8848802f36e9 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sat, 28 Jun 2025 17:49:51 +0300 Subject: [PATCH 158/294] DOC: avoid searching some directories for doxygen-commented source code (#29275) * DOC: avoid searching some directories for doxygen-commented source code * lint [skip azp][skip actions][skip cirrus] * use known paths [skip azp][skip actions][skip cirrus] --- doc/preprocess.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/doc/preprocess.py b/doc/preprocess.py index b2e64ab6393a..bc43e89764f8 100755 --- a/doc/preprocess.py +++ b/doc/preprocess.py @@ -4,7 +4,7 @@ def main(): - doxy_gen(os.path.abspath(os.path.join('..'))) + doxy_gen(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) def doxy_gen(root_path): """ @@ -24,6 +24,7 @@ def doxy_gen(root_path): class DoxyTpl(Template): delimiter = '@' + def doxy_config(root_path): """ Fetch all Doxygen sub-config files and gather it with the main config file. @@ -35,13 +36,14 @@ def doxy_config(root_path): conf = DoxyTpl(fd.read()) confs.append(conf.substitute(CUR_DIR=dsrc_path, **sub)) - for dpath, _, files in os.walk(root_path): - if ".doxyfile" not in files: - continue - conf_path = os.path.join(dpath, ".doxyfile") - with open(conf_path) as fd: - conf = DoxyTpl(fd.read()) - confs.append(conf.substitute(CUR_DIR=dpath, **sub)) + for subdir in ["doc", "numpy"]: + for dpath, _, files in os.walk(os.path.join(root_path, subdir)): + if ".doxyfile" not in files: + continue + conf_path = os.path.join(dpath, ".doxyfile") + with open(conf_path) as fd: + conf = DoxyTpl(fd.read()) + confs.append(conf.substitute(CUR_DIR=dpath, **sub)) return confs From 907b73ce9befb95deb5d0c2097bf3f307fd64cac Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 29 Jun 2025 21:22:53 +0200 Subject: [PATCH 159/294] MAIN: Enforce ruff E501 rule --- numpy/_core/tests/test_multithreading.py | 13 ++++++++----- ruff.toml | 1 - 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 09f907561ae5..5f8a2f11ea1f 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -272,12 +272,15 @@ def closure(b): def test_nonzero(dtype): # See: gh-28361 # - # np.nonzero uses np.count_nonzero to determine the size of the output array - # In a second pass the indices of the non-zero elements are determined, but they can have changed + # np.nonzero uses np.count_nonzero to determine the size of the output. + # array. In a second pass the indices of the non-zero elements are + # determined, but they can have changed # - # This test triggers a data race which is suppressed in the TSAN CI. The test is to ensure - # np.nonzero does not generate a segmentation fault + # This test triggers a data race which is suppressed in the TSAN CI. + # The test is to ensure np.nonzero does not generate a segmentation fault x = np.random.randint(4, size=100).astype(dtype) + expected_warning = ('number of non-zero array elements changed' + ' during function execution') def func(index): for _ in range(10): @@ -287,6 +290,6 @@ def func(index): try: _ = np.nonzero(x) except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) + assert expected_warning in str(ex) run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) diff --git a/ruff.toml b/ruff.toml index 8db6df67377a..323f8cc5b3b9 100644 --- a/ruff.toml +++ b/ruff.toml @@ -81,7 +81,6 @@ ignore = [ "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] -"numpy/_core/tests/test_multithreading.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] "numpy/_core/tests/test_ufunc*py" = ["E501"] "numpy/_core/tests/test_umath*py" = ["E501"] From 6aeb7c6e5d7a40bc49ed2cb5980d5a706583bc26 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 29 Jun 2025 22:50:06 +0200 Subject: [PATCH 160/294] More linting --- numpy/_core/tests/test_datetime.py | 6 ++-- numpy/_core/tests/test_simd_module.py | 6 ++-- numpy/_core/tests/test_strings.py | 42 ++++++++++++++++++--------- ruff.toml | 4 +-- 4 files changed, 37 insertions(+), 21 deletions(-) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 81ac9778971b..7ccb080a75c1 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -874,9 +874,11 @@ def test_pickle(self): def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_raises(ValueError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, 1)) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_raises(TypeError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): diff --git a/numpy/_core/tests/test_simd_module.py b/numpy/_core/tests/test_simd_module.py index dca83fd427b6..3de1596aa10a 100644 --- a/numpy/_core/tests/test_simd_module.py +++ b/numpy/_core/tests/test_simd_module.py @@ -23,7 +23,8 @@ int_sfx = unsigned_sfx + signed_sfx all_sfx = unsigned_sfx + int_sfx -@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +@pytest.mark.skipif(not npyv, + reason="could not find any SIMD extension with NPYV support") class Test_SIMD_MODULE: @pytest.mark.parametrize('sfx', all_sfx) @@ -47,7 +48,8 @@ def test_raises(self): pytest.raises(TypeError, vcb("setall"), [1]) pytest.raises(TypeError, vcb("load"), 1) pytest.raises(ValueError, vcb("load"), [1]) - pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + value = getattr(npyv, f"reinterpret_{sfx}_u32")(a) + pytest.raises(ValueError, vcb("store"), [1], value) @pytest.mark.skipif(not npyv2, reason=( "could not find a second SIMD extension with NPYV support" diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 56e928df4d7b..1b77a535eee6 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -989,22 +989,36 @@ def test_slice_unsupported(self, dt): with pytest.raises(TypeError, match="did not contain a loop"): np.strings.slice(np.array([1, 2, 3]), 4) - with pytest.raises(TypeError, match=r"Cannot cast ufunc '_slice' input .* from .* to dtype\('int(64|32)'\)"): - np.strings.slice(np.array(['foo', 'bar'], dtype=dt), np.array(['foo', 'bar'], dtype=dt)) - - @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64]) + regexp = (r"Cannot cast ufunc '_slice' input .* " + r"from .* to dtype\('int(64|32)'\)") + with pytest.raises(TypeError, match=regexp): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), + np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]) def test_slice_int_type_promotion(self, int_dt, dt): buf = np.array(["hello", "world"], dtype=dt) - - assert_array_equal(np.strings.slice(buf, int_dt(4)), np.array(["hell", "worl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([4, 4], dtype=int_dt)), np.array(["hell", "worl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(2), int_dt(4)), np.array(["ll", "rl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([2, 2], dtype=int_dt), np.array([4, 4], dtype=int_dt)), np.array(["ll", "rl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(0), int_dt(4), int_dt(2)), np.array(["hl", "wr"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([0, 0], dtype=int_dt), np.array([4, 4], dtype=int_dt), np.array([2, 2], dtype=int_dt)), np.array(["hl", "wr"], dtype=dt)) + np_slice = np.strings.slice + assert_array_equal(np_slice(buf, int_dt(4)), + np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)), + np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)), + np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt), + np.array([4, 4], dtype=int_dt)), + np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)), + np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np_slice(buf, + np.array([0, 0], dtype=int_dt), + np.array([4, 4], dtype=int_dt), + np.array([2, 2], dtype=int_dt)), + np.array(["hl", "wr"], dtype=dt)) @pytest.mark.parametrize("dt", ["U", "T"]) class TestMethodsWithUnicode: diff --git a/ruff.toml b/ruff.toml index 323f8cc5b3b9..9916097420b6 100644 --- a/ruff.toml +++ b/ruff.toml @@ -76,7 +76,6 @@ ignore = [ "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] -"numpy/_core/tests/test_datetime.py" = ["E501"] "numpy/_core/tests/test_dtype.py" = ["E501"] "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] @@ -86,8 +85,7 @@ ignore = [ "numpy/_core/tests/test_umath*py" = ["E501"] "numpy/_core/tests/test_numeric*.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_simd*.py" = ["E501"] -"numpy/_core/tests/test_strings.py" = ["E501"] +"numpy/_core/tests/test_simd.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] From 7bdf6b4b9ed4976c0e6df512d95f09ea87aaa7f0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 29 Jun 2025 14:52:03 -0600 Subject: [PATCH 161/294] BUG: Fix version check in blas_utils.c Co-Authored-By: Daniel Bertalan --- numpy/_core/src/common/blas_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index aaf976ed70e4..155963891071 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -43,7 +43,7 @@ is_macOS_version_15_4_or_later(void){ goto cleanup; } - if(major >= 15 && minor >= 4){ + if (major > 15 || (major == 15 && minor >= 4)) { ret = true; } From c96fcc8f7a5030098f8e0c304ce20864c3509021 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jun 2025 21:07:46 -0600 Subject: [PATCH 162/294] MAINT: Bump github/codeql-action from 3.29.1 to 3.29.2 (#29296) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.1 to 3.29.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/39edc492dbe16b1465b0cafca41432d857bdb31a...181d5eefc20863364f96762470ba6f862bdef56b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1b8f958b5a21..cbee65f0b713 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c23baa2f2b42..aa3bf1aec7cc 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v2.1.27 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v2.1.27 with: sarif_file: results.sarif From c0c972ddf58b891d6b9851348413a29fd8e45624 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 1 Jul 2025 11:40:06 +0100 Subject: [PATCH 163/294] DOCS: Remove incorrect "Returns" section from `MaskedArray.sort` --- numpy/ma/core.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 5e231fbbadb7..2860bec848aa 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5812,11 +5812,6 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, stable : bool, optional Only for compatibility with ``np.sort``. Ignored. - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - See Also -------- numpy.ndarray.sort : Method to sort an array in-place. From 82ae5ab13162e7c1e0c49cad6106d0d8cfa90fd1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 1 Jul 2025 12:24:22 +0200 Subject: [PATCH 164/294] DEP: Give a visible warning when `align=` to dtype is a non-bool This seems generally confusing. I would like to make it keyword only, but this already gives a warning when loading _very_ old pickles, meaning I am not quite sure we should change away from a warning quickly. We should fix things around pickling and start pickling in a way that makes it easier to move to keyword only arguments. (I suppose one could detect the case of `np.dtype(obj, False, True)` and assume it is via unpickling, but... I am assuming that it is OK to (eventually) break unpickling these 10+ year old files, but I am not in a rush to actually do so and go through with the deprecation. Signed-off-by: Sebastian Berg --- .../upcoming_changes/29301.deprecation.rst | 7 ++++ numpy/_core/src/common/npy_import.h | 1 + numpy/_core/src/multiarray/conversion_utils.c | 20 +++------ numpy/_core/src/multiarray/descriptor.c | 41 +++++++++++++------ numpy/_core/tests/test_datetime.py | 26 ++++++------ numpy/_core/tests/test_deprecations.py | 20 +++++++++ numpy/_core/tests/test_multiarray.py | 14 +++++++ numpy/_core/tests/test_regression.py | 12 +++++- numpy/lib/tests/test_format.py | 17 ++++---- 9 files changed, 111 insertions(+), 47 deletions(-) create mode 100644 doc/release/upcoming_changes/29301.deprecation.rst diff --git a/doc/release/upcoming_changes/29301.deprecation.rst b/doc/release/upcoming_changes/29301.deprecation.rst new file mode 100644 index 000000000000..e520b692458d --- /dev/null +++ b/doc/release/upcoming_changes/29301.deprecation.rst @@ -0,0 +1,7 @@ +``align=`` must be passed as boolean to ``np.dtype()`` +------------------------------------------------------ +When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be +given if ``align=`` is not a boolean. +This is mainly to prevent accidentally passing a subarray align flag where it +has no effect, such as ``np.dtype("f8", 3)`` instead of ``np.dtype(("f8", 3))``. +We strongly suggest to always pass ``align=`` as a keyword argument. diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 970efa8f549e..87944989e95d 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -41,6 +41,7 @@ typedef struct npy_runtime_imports_struct { PyObject *_std; PyObject *_sum; PyObject *_ufunc_doc_signature_formatter; + PyObject *_usefields; PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 5ada3e6e4faf..1994dd0ee8f7 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -438,15 +438,11 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags) NPY_NO_EXPORT int PyArray_BoolConverter(PyObject *object, npy_bool *val) { - if (PyObject_IsTrue(object)) { - *val = NPY_TRUE; - } - else { - *val = NPY_FALSE; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } @@ -460,15 +456,11 @@ PyArray_OptionalBoolConverter(PyObject *object, int *val) if (object == Py_None) { return NPY_SUCCEED; } - if (PyObject_IsTrue(object)) { - *val = 1; - } - else { - *val = 0; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index f520e3c4bceb..c32b0e5e3f9f 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1029,17 +1029,13 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) static PyArray_Descr * _convert_from_field_dict(PyObject *obj, int align) { - PyObject *_numpy_internal; - PyArray_Descr *res; - - _numpy_internal = PyImport_ImportModule("numpy._core._internal"); - if (_numpy_internal == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_usefields", &npy_runtime_imports._usefields) < 0) { return NULL; } - res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, - "_usefields", "Oi", obj, align); - Py_DECREF(_numpy_internal); - return res; + + return (PyArray_Descr *)PyObject_CallFunctionObjArgs( + npy_runtime_imports._usefields, obj, align ? Py_True : Py_False, NULL); } /* @@ -2554,7 +2550,9 @@ arraydescr_new(PyTypeObject *subtype, return NULL; } - PyObject *odescr, *metadata=NULL; + PyObject *odescr; + PyObject *oalign = NULL; + PyObject *metadata = NULL; PyArray_Descr *conv; npy_bool align = NPY_FALSE; npy_bool copy = NPY_FALSE; @@ -2562,14 +2560,33 @@ arraydescr_new(PyTypeObject *subtype, static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O!:dtype", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O!:dtype", kwlist, &odescr, - PyArray_BoolConverter, &align, + &oalign, PyArray_BoolConverter, ©, &PyDict_Type, &metadata)) { return NULL; } + if (oalign != NULL) { + /* + * In the future, reject non Python (or NumPy) boolean, including integers to avoid any + * possibility of thinking that an integer alignment makes sense here. + */ + if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool)) { + /* Deprecated 2025-07-01: NumPy 2.4 */ + if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, + "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " + "Did you mean to pass a tuple to create a subarray type? (Deprecated NumPy 2.4)", + oalign) < 0) { + return NULL; + } + } + if (!PyArray_BoolConverter(oalign, &align)) { + return NULL; + } + } + conv = _convert_from_any(odescr, align); if (conv == NULL) { return NULL; diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 7ccb080a75c1..c5d2ca459e6a 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -858,18 +858,20 @@ def test_pickle(self): delta) # Check that loading pickles from 1.6 works - pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ - b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ - b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + with pytest.warns(np.exceptions.VisibleDeprecationWarning, + match=r".*align should be passed"): + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 7d4875d6d149..df27a1f9d076 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -453,3 +453,23 @@ def test_deprecated(self): struct_ufunc.add_triplet, "new docs" ) ) + + +class TestDTypeAlignBool(_VisibleDeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + # NOTE: As you can see, finalizing this deprecation breaks some (very) old + # pickle files. This may be fine, but sneeds to be done with some care since + # it breaks all of them and not just some. + # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) + message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " + + def test_deprecated(self): + # in particular integers should be rejected because one may think they mean + # alignment, or pass them accidentally as a subarray shape (meaning to pass + # a tuple). + self.assert_deprecated(lambda: np.dtype("f8", align=3)) + + @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) + def test_not_deprecated(self, align): + # if the user passes a bool, it is accepted. + self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 0faf35c64b98..4ee021d18f6f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4505,18 +4505,24 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_int8(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_float32(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01 Date: Tue, 1 Jul 2025 17:34:13 +0200 Subject: [PATCH 165/294] MAINT: Enable linting with ruff E501 (#29300) * MAINT: Enforce ruff E501 * fix merge --- benchmarks/benchmarks/bench_linalg.py | 13 ++++--- numpy/_core/tests/test_simd.py | 12 ++++--- numpy/_core/tests/test_ufunc.py | 13 ++++--- numpy/_core/tests/test_umath_accuracy.py | 12 +++++-- numpy/_core/tests/test_umath_complex.py | 44 +++++++++++++----------- numpy/tests/test_configtool.py | 12 ++++--- ruff.toml | 9 ++--- 7 files changed, 68 insertions(+), 47 deletions(-) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 03e2fd77f4f2..49a7ae84fde6 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -148,7 +148,9 @@ def setup(self, dtype): self.non_contiguous_dim1_small = np.arange(1, 80, 2, dtype=dtype) self.non_contiguous_dim1 = np.arange(1, 4000, 2, dtype=dtype) self.non_contiguous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40) - self.non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40) + + non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype) + self.non_contiguous_dim3 = non_contiguous_dim3.reshape(20, 30, 40) # outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two def time_einsum_outer(self, dtype): @@ -180,11 +182,13 @@ def time_einsum_contig_outstride0(self, dtype): # outer(a,b): non_contiguous arrays def time_einsum_noncon_outer(self, dtype): - np.einsum("i,j", self.non_contiguous_dim1, self.non_contiguous_dim1, optimize=True) + np.einsum("i,j", self.non_contiguous_dim1, + self.non_contiguous_dim1, optimize=True) # multiply(a, b):non_contiguous arrays def time_einsum_noncon_multiply(self, dtype): - np.einsum("..., ...", self.non_contiguous_dim2, self.non_contiguous_dim3, optimize=True) + np.einsum("..., ...", self.non_contiguous_dim2, + self.non_contiguous_dim3, optimize=True) # sum and multiply:non_contiguous arrays def time_einsum_noncon_sum_mul(self, dtype): @@ -200,7 +204,8 @@ def time_einsum_noncon_mul(self, dtype): # contig_contig_outstride0_two: non_contiguous arrays def time_einsum_noncon_contig_contig(self, dtype): - np.einsum("ji,i->", self.non_contiguous_dim2, self.non_contiguous_dim1_small, optimize=True) + np.einsum("ji,i->", self.non_contiguous_dim2, + self.non_contiguous_dim1_small, optimize=True) # sum_of_products_contig_outstride0_one: non_contiguous arrays def time_einsum_noncon_contig_outstride0(self, dtype): diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index acea4315e679..335abc98c84e 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -271,7 +271,8 @@ def test_operators_shift(self): shr = self.shr(vdata_a, count) assert shr == data_shr_a - # shift by zero or max or out-range immediate constant is not applicable and illogical + # shift by zero or max or out-range immediate constant is not + # applicable and illogical for count in range(1, self._scalar_size()): # load to cast data_shl_a = self.load([a << count for a in data_a]) @@ -419,7 +420,8 @@ def test_sqrt(self): sqrt = self.sqrt(self.setall(case)) assert sqrt == pytest.approx(data_sqrt, nan_ok=True) - data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + # load to truncate precision + data_sqrt = self.load([math.sqrt(x) for x in data]) sqrt = self.sqrt(vdata) assert sqrt == data_sqrt @@ -1334,8 +1336,10 @@ def test_mask_conditional(self): for sfx in sfxes: skip_m = skip_sfx.get(sfx, skip) inhr = (cls,) - attr = {"npyv": targets[target_name], "sfx": sfx, "target_name": target_name} - tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + attr = {"npyv": targets[target_name], "sfx": sfx, + "target_name": target_name} + type_name = f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}" + tcls = type(type_name, inhr, attr) if skip_m: pytest.mark.skip(reason=skip_m)(tcls) globals()[tcls.__name__] = tcls diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index a1cd63aec523..836be1245399 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1100,17 +1100,15 @@ def test_output_ellipsis_errors(self): match=r"out=\.\.\. is only allowed as a keyword argument."): np.add.reduce(1, (), None, ...) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + type_error = r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple" + with pytest.raises(TypeError, match=type_error): np.negative(1, out=(...,)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): # We only allow out=... not individual args for now np.divmod(1, 2, out=(np.empty(()), ...)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): np.add.reduce(1, out=(...,)) def test_axes_argument(self): @@ -1556,7 +1554,8 @@ def __eq__(self, other): arr1d = np.array([HasComparisons()]) assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d), np.array([True])) assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) def test_object_array_reduction(self): diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index da9419d63a8a..3ca2f508672e 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -68,8 +68,16 @@ def test_validate_transcendentals(self): npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + data_input_str = data_subset['input'].astype(str) + data_output_str = data_subset['output'].astype(str) + data_type_str = data_subset['type'].astype(str) + + inval = np.array(str_to_float(data_input_str, + data_type_str), + dtype=eval(datatype)) + outval = np.array(str_to_float(data_output_str, + data_type_str), + dtype=eval(datatype)) perm = np.random.permutation(len(inval)) inval = inval[perm] outval = outval[perm] diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 8f6f5c682a91..7012e7e357fe 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -562,31 +562,35 @@ class TestSpecialComplexAVX: @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) def test_array(self, stride, astype): - arr = np.array([complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(np.inf, np.nan), - complex(np.inf, np.inf), - complex(0., np.inf), - complex(np.inf, 0.), - complex(0., 0.), - complex(0., np.nan), - complex(np.nan, 0.)], dtype=astype) - abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) - sq_true = np.array([complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(-np.inf, np.nan), - complex(np.inf, np.nan), - complex(0., 0.), - complex(np.nan, np.nan), - complex(np.nan, np.nan)], dtype=astype) + nan = np.nan + inf = np.inf + arr = np.array([complex(nan, nan), + complex(nan, inf), + complex(inf, nan), + complex(inf, inf), + complex(0., inf), + complex(inf, 0.), + complex(0., 0.), + complex(0., nan), + complex(nan, 0.)], dtype=astype) + abs_true = np.array([nan, inf, inf, inf, inf, inf, 0., nan, nan], + dtype=arr.real.dtype) + sq_true = np.array([complex(nan, nan), + complex(nan, nan), + complex(nan, nan), + complex(nan, inf), + complex(-inf, nan), + complex(inf, nan), + complex(0., 0.), + complex(nan, nan), + complex(nan, nan)], dtype=astype) with np.errstate(invalid='ignore'): assert_equal(np.abs(arr[::stride]), abs_true[::stride]) assert_equal(np.square(arr[::stride]), sq_true[::stride]) class TestComplexAbsoluteAVX: - @pytest.mark.parametrize("arraysize", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("arraysize", + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) # test to ensure masking and strides work as intended in the AVX implementation diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index e0b9bb1b7aff..a9c23b5cc007 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -15,8 +15,10 @@ PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' -@pytest.mark.skipif(not IS_INSTALLED, reason="`numpy-config` not expected to be installed") -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") +@pytest.mark.skipif(not IS_INSTALLED, + reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, + reason="wasm interpreter cannot start subprocess") class TestNumpyConfig: def check_numpyconfig(self, arg): p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) @@ -36,13 +38,15 @@ def test_configtool_pkgconfigdir(self): assert pathlib.Path(stdout) == PKG_CONFIG_DIR -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy must be installed to check its entrypoints") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy must be installed to check its entrypoints") def test_pkg_config_entrypoint(): (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy.pc is only available when numpy is installed") @pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") def test_pkg_config_config_exists(): assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/ruff.toml b/ruff.toml index 9916097420b6..ed6026ed0665 100644 --- a/ruff.toml +++ b/ruff.toml @@ -72,7 +72,6 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] @@ -81,17 +80,15 @@ ignore = [ "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] -"numpy/_core/tests/test_ufunc*py" = ["E501"] -"numpy/_core/tests/test_umath*py" = ["E501"] -"numpy/_core/tests/test_numeric*.py" = ["E501"] +"numpy/_core/tests/test_umath.py" = ["E501"] +"numpy/_core/tests/test_numeric.py" = ["E501"] +"numpy/_core/tests/test_numerictypes.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_simd.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] -"numpy/tests/test_configtool.py" = ["E501"] "numpy/f2py/*py" = ["E501"] # for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length "numpy/_typing/_array_like.py" = ["E501"] From 3727d5b23e33c3c495b8025ad8c7413d72c9db0d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 1 Jul 2025 18:39:39 +0100 Subject: [PATCH 166/294] TYP: Add shape typing to return values of `np.nonzero` and `ndarray.nonzero`, simplify `MaskedArray.nonzero` return type (#29303) --- numpy/__init__.pyi | 2 +- numpy/_core/fromnumeric.pyi | 2 +- numpy/ma/core.pyi | 2 +- numpy/typing/tests/data/reveal/fromnumeric.pyi | 8 ++++---- numpy/typing/tests/data/reveal/ma.pyi | 2 +- numpy/typing/tests/data/reveal/ndarray_misc.pyi | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d12e63a8bed9..d071f3e6b92d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2355,7 +2355,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> tuple[NDArray[intp], ...]: ... + def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 2aedc727e6dc..ba6669936571 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -576,7 +576,7 @@ def ravel( @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... # this prevents `Any` from being returned with Pyright @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index caea284bb566..3ca5d9be0b97 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1102,7 +1102,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... + def nonzero(self) -> tuple[_Array1D[intp], ...]: ... def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 5438e001a13f..62bc926c765b 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -131,10 +131,10 @@ assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a101ba0d48b0..48311a7e87fa 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -380,7 +380,7 @@ assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.d assert_type(MAR_b.T, MaskedArray[np.bool]) assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) -assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) # Masked Array addition diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 4cbb90621ca9..7f0a214f8f52 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -170,7 +170,7 @@ assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) +assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) From ec34c6d3f6ff7969bdd82f9b2347070d11d8b7de Mon Sep 17 00:00:00 2001 From: ianlv <168640168+ianlv@users.noreply.github.com> Date: Wed, 2 Jul 2025 16:38:32 +0800 Subject: [PATCH 167/294] chore: remove redundant words in comment (#29306) --- doc/source/reference/c-api/array.rst | 2 +- numpy/_core/src/common/dlpack/dlpack.h | 2 +- numpy/_core/src/multiarray/alloc.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 02db78ebb2b1..dc043b77f187 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3814,7 +3814,7 @@ In this case, the helper C files typically do not have a canonical place where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and fast to call it often). -To solve this, NumPy provides the following pattern that the the main +To solve this, NumPy provides the following pattern that the main file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: .. code-block:: c diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index 19ecc27761f8..4dc164fe9c1b 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -270,7 +270,7 @@ typedef struct DLManagedTensor { void (*deleter)(struct DLManagedTensor * self); } DLManagedTensor; -// bit masks used in in the DLManagedTensorVersioned +// bit masks used in the DLManagedTensorVersioned /*! \brief bit mask to indicate that the tensor is read only. */ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index f5600c99aaa5..8cd763f971ed 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -75,7 +75,7 @@ _npy_init_workspace( /* * Helper definition macro for a small work/scratchspace. - * The `NAME` is the C array to to be defined of with the type `TYPE`. + * The `NAME` is the C array to be defined of with the type `TYPE`. * * The usage pattern for this is: * From 888c4ff4be6adbada62bbb4a35523c05f9cff663 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 10:00:14 +0100 Subject: [PATCH 168/294] TYP: Add type annotations for `MaskedArray.__{pow,rpow}__` (#29277) --- numpy/__init__.pyi | 2 + numpy/ma/core.pyi | 69 +++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 84 +++++++++++++++++++++++++++ 3 files changed, 153 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d071f3e6b92d..6f52d1e37ce6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3230,6 +3230,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__pow__` @overload def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3263,6 +3264,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + # Keep in sync with `MaskedArray.__rpow__` @overload def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 3ca5d9be0b97..5d29577b308b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -866,8 +866,73 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __pow__(self, other, mod: None = None, /): ... - def __rpow__(self, other, mod: None = None, /): ... + # Keep in sync with `ndarray.__pow__` + @overload + def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + @overload + def __pow__( + self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + @overload + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + @overload + def __pow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + # Keep in sync with `ndarray.__rpow__` + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__( + self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + @overload + def __rpow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... # Keep in sync with `ndarray.__iadd__` @overload diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 48311a7e87fa..f2e9b136259a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -884,3 +884,87 @@ assert_type(AR_LIKE_f // MAR_o, Any) assert_type(AR_LIKE_td64 // MAR_o, Any) assert_type(AR_LIKE_dt64 // MAR_o, Any) assert_type(AR_LIKE_o // MAR_o, Any) + +# Masked Array power + +assert_type(MAR_b ** AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_u ** MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i ** MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_b, Any) + +assert_type(MAR_u4 ** AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 ** AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_u4 , MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4 , MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4 , MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4 , MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4 , Any) + +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 ** AR_LIKE_o, Any) +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) + +assert_type(AR_LIKE_u ** MAR_i8 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8 , MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8 , MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8 , Any) + +assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c ** MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_f8, Any) + +assert_type(MAR_c16 ** AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o ** MAR_c16, Any) + +assert_type(MAR_o ** AR_LIKE_b, Any) +assert_type(MAR_o ** AR_LIKE_u, Any) +assert_type(MAR_o ** AR_LIKE_i, Any) +assert_type(MAR_o ** AR_LIKE_f, Any) +assert_type(MAR_o ** AR_LIKE_c, Any) +assert_type(MAR_o ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_o, Any) +assert_type(AR_LIKE_u ** MAR_o, Any) +assert_type(AR_LIKE_i ** MAR_o, Any) +assert_type(AR_LIKE_f ** MAR_o, Any) +assert_type(AR_LIKE_c ** MAR_o, Any) +assert_type(AR_LIKE_o ** MAR_o, Any) From 34d12a963351a0ec1a58e42f37592e49d4f7eed7 Mon Sep 17 00:00:00 2001 From: Shyok Mutsuddi Date: Wed, 2 Jul 2025 11:05:30 +0000 Subject: [PATCH 169/294] test: add regression test for grammar in ufunc TypeError message --- numpy/_core/tests/test_umath.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index c54b2ac86bc2..b5145e9d642b 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4917,3 +4917,12 @@ def test_ufunc_arg(self): @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") def test_string_arg(self): assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) + +class TestHypotErrorMessages: + def test_hypot_error_message_single_arg(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): + np.hypot(5) + + def test_hypot_error_message_multiple_args(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 4 were given"): + np.hypot(1, 2, 3, 4) From 99f894a76ebf91638d05c8611d4e0bc6f1ec9b78 Mon Sep 17 00:00:00 2001 From: Shyok Mutsuddi Date: Wed, 2 Jul 2025 11:06:34 +0000 Subject: [PATCH 170/294] fix: correct singular/plural grammar in ufunc TypeError message --- numpy/_core/src/common/npy_argparse.c | 10 ++++++---- numpy/_core/src/umath/ufunc_object.c | 7 ++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 6766b17043ac..aa011be9c585 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -243,16 +243,18 @@ static int raise_incorrect_number_of_positional_args(const char *funcname, const _NpyArgParserCache *cache, Py_ssize_t len_args) { + const char *verb = (len_args == 1) ? "was" : "were"; if (cache->npositional == cache->nrequired) { PyErr_Format(PyExc_TypeError, - "%s() takes %d positional arguments but %zd were given", - funcname, cache->npositional, len_args); + "%s() takes %d positional arguments but %zd %s given", + funcname, cache->npositional, len_args, verb); } else { PyErr_Format(PyExc_TypeError, "%s() takes from %d to %d positional arguments but " - "%zd were given", - funcname, cache->nrequired, cache->npositional, len_args); + "%zd %s given", + funcname, cache->nrequired, cache->npositional, + len_args, verb); } return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 4cdde8d3d77d..485364af1ff2 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4319,10 +4319,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Check number of arguments */ if (NPY_UNLIKELY((len_args < nin) || (len_args > nop))) { + const char *verb = (len_args == 1) ? "was" : "were"; PyErr_Format(PyExc_TypeError, - "%s() takes from %d to %d positional arguments but " - "%zd were given", - ufunc_get_name_cstr(ufunc) , nin, nop, len_args); + "%s() takes from %d to %d positional arguments but " + "%zd %s given", + ufunc_get_name_cstr(ufunc), nin, nop, len_args, verb); goto fail; } From a04953939d91490103d308491479217ef4a9802b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 2 Jul 2025 13:10:01 +0200 Subject: [PATCH 171/294] Update numpy/_core/tests/test_deprecations.py Co-authored-by: Matti Picus --- numpy/_core/tests/test_deprecations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index df27a1f9d076..b83a2ac610ee 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -458,7 +458,7 @@ def test_deprecated(self): class TestDTypeAlignBool(_VisibleDeprecationTestCase): # Deprecated in Numpy 2.4, 2025-07 # NOTE: As you can see, finalizing this deprecation breaks some (very) old - # pickle files. This may be fine, but sneeds to be done with some care since + # pickle files. This may be fine, but needs to be done with some care since # it breaks all of them and not just some. # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " From 0578bdfe74c914b602ba17fef833dd5c5b2ac546 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 15:19:28 +0100 Subject: [PATCH 172/294] TYP: Type ``MaskedArray.{trace,round,cumsum,cumprod}`` (#29307) --- numpy/__init__.pyi | 4 ++ numpy/ma/core.pyi | 63 +++++++++++++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 12 +++++ 3 files changed, 75 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6f52d1e37ce6..6bcbe5b68662 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1741,6 +1741,7 @@ class _ArrayOrScalarCommon: @overload def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray @@ -1776,6 +1777,7 @@ class _ArrayOrScalarCommon: @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -1783,6 +1785,7 @@ class _ArrayOrScalarCommon: @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -2385,6 +2388,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): stable: bool | None = ..., ) -> None: ... + # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 5d29577b308b..3fad41a1434e 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -46,6 +46,7 @@ from numpy import ( from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _32Bit, _64Bit, @@ -1168,18 +1169,72 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): ) -> _ArrayT: ... def nonzero(self) -> tuple[_Array1D[intp], ...]: ... - def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.trace` + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, + ) -> _ArrayT: ... + def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... - def cumsum(self, axis=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... product: Any - def cumprod(self, axis=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... def anom(self, axis=..., dtype=...): ... def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def round(self, decimals=..., out=...): ... + + # Keep in sync with `ndarray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... # Keep in-sync with np.ma.argmin diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index f2e9b136259a..9e8ab98ec4ce 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -383,6 +383,18 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) +assert_type(MAR_f8.trace(), Any) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.round(), MaskedArray[np.float64]) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) From 1cc6113ca186b24d08cb7be94e2834bec4768c07 Mon Sep 17 00:00:00 2001 From: Sachin Shah <39803835+inventshah@users.noreply.github.com> Date: Wed, 2 Jul 2025 11:25:35 -0400 Subject: [PATCH 173/294] TYP: add explicit types for np.quantile (#29305) --- numpy/lib/_function_base_impl.pyi | 161 +++++++++++++++++++++++++++++- 1 file changed, 158 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 78947b1b3b46..14e48f1cc3fd 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -615,6 +615,7 @@ _MethodKind = L[ "nearest", ] +# NOTE: keep in sync with `quantile` @overload def percentile( a: _ArrayLikeFloat_co, @@ -772,9 +773,163 @@ def percentile( weights: _ArrayLikeFloat_co | None = ..., ) -> _ArrayT: ... -# NOTE: Not an alias, but they do have identical signatures -# (that we can reuse) -quantile = percentile +# NOTE: keep in sync with `percentile` +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> floating: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> complexfloating: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> timedelta64: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> datetime64: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[floating]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[timedelta64]: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[datetime64]: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[object_]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... _ScalarT_fm = TypeVar( "_ScalarT_fm", From 75e86ba27ae9f99ee67660204d7ca1e16cb323da Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 18:38:59 +0100 Subject: [PATCH 174/294] DOCS: Fix rendering of ``MaskedArray.anom`` ``dtype`` (#29311) --- numpy/ma/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 2860bec848aa..e7d8a20f6c6c 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5454,8 +5454,8 @@ def anom(self, axis=None, dtype=None): The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. + the default is float32; for arrays of float types it is the same as + the array type. See Also -------- From 84234016c4676ffe0215f300e5b3f24df62f8efd Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 2 Jul 2025 22:50:59 +0300 Subject: [PATCH 175/294] BLD: remove unused github workflow (#29312) --- .github/workflows/windows_arm64.yml | 208 ---------------------------- 1 file changed, 208 deletions(-) delete mode 100644 .github/workflows/windows_arm64.yml diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml deleted file mode 100644 index 3eaf02eb062c..000000000000 --- a/.github/workflows/windows_arm64.yml +++ /dev/null @@ -1,208 +0,0 @@ -name: Windows Arm64 - -on: - workflow_dispatch: - -env: - python_version: 3.12 - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - windows_arm: - runs-on: windows-2022 - - # To enable this job on a fork, comment out: - if: github.repository == 'numpy/numpy' - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: ${{env.python_version}} - architecture: x64 - - - name: Install build dependencies from PyPI - run: | - python -m pip install -r requirements/build_requirements.txt - - - name: Prepare python - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Detecting python location and version - $PythonDir = (Split-Path -Parent (get-command python).Path) - $PythonVersionParts = ( -split (python -V)) - $PythonVersion = $PythonVersionParts[1] - - #Downloading the package for appropriate python version from nuget - $PythonARM64NugetLink = "https://www.nuget.org/api/v2/package/pythonarm64/$PythonVersion" - $PythonARM64NugetZip = "nuget_python.zip" - $PythonARM64NugetDir = "temp_nuget" - Invoke-WebRequest $PythonARM64NugetLink -OutFile $PythonARM64NugetZip - - #Changing the libs folder to enable python libraries to be linked for arm64 - Expand-Archive $PythonARM64NugetZip $PythonARM64NugetDir - Copy-Item $PythonARM64NugetDir\tools\libs\* $PythonDir\libs - Remove-Item -Force -Recurse $PythonARM64NugetDir - Remove-Item -Force $PythonARM64NugetZip - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Prepare Licence - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - $CurrentDir = (get-location).Path - $LicenseFile = "$CurrentDir\LICENSE.txt" - Set-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile "----" - Add-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile (Get-Content "$CurrentDir\LICENSES_bundled.txt") - Add-Content $LicenseFile (Get-Content "$CurrentDir\tools\wheels\LICENSE_win32.txt") - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Wheel build - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Creating cross compile script for messon subsystem - $CurrentDir = (get-location) - $CrossScript = "$CurrentDir\arm64_w64.txt" - $CrossScriptContent = - { - [host_machine] - system = 'windows' - subsystem = 'windows' - kernel = 'nt' - cpu_family = 'aarch64' - cpu = 'aarch64' - endian = 'little' - - [binaries] - c='cl.exe' - cpp = 'cl.exe' - - [properties] - sizeof_short = 2 - sizeof_int = 4 - sizeof_long = 4 - sizeof_long_long = 8 - sizeof_float = 4 - sizeof_double = 8 - sizeof_long_double = 8 - sizeof_size_t = 8 - sizeof_wchar_t = 2 - sizeof_off_t = 4 - sizeof_Py_intptr_t = 8 - sizeof_PY_LONG_LONG = 8 - longdouble_format = 'IEEE_DOUBLE_LE' - } - Set-Content $CrossScript $CrossScriptContent.ToString() - - #Setting up cross compilers from MSVC - $Products = 'Community', 'Professional', 'Enterprise', 'BuildTools' | % { "Microsoft.VisualStudio.Product.$_" } - $VsInstallPath = (vswhere -products $Products -latest -format json | ConvertFrom-Json).installationPath - $VSVars = (Get-ChildItem -Path $VsInstallPath -Recurse -Filter "vcvarsamd64_arm64.bat").FullName - $ScriptingObj = New-Object -ComObject Scripting.FileSystemObject - $VSVarsShort = $ScriptingObj.GetFile($VSVars).ShortPath - cmd /c "$VSVarsShort && set" | - ForEach-Object { - if ($_ -match "=") { - $Var = $_.split("=") - set-item -force -path "ENV:\$($Var[0])" -value "$($Var[1])" - } - } - - #Building the wheel - pip wheel . --config-settings=setup-args="--cross-file=$CrossScript" - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Fix wheel - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Finding whl file - $CurrentDir = (get-location) - $WhlName = ((Get-ChildItem -Filter "*.whl").FullName) - $ZipWhlName = "$CurrentDir\ZipWhlName.zip" - $UnzippedWhl = "$CurrentDir\unzipedWhl" - - #Expanding whl file - Rename-Item -Path $WhlName $ZipWhlName - if (Test-Path $UnzippedWhl) { - Remove-Item -Force -Recurse $UnzippedWhl - } - Expand-Archive -Force -Path $ZipWhlName $UnzippedWhl - - #Renaming all files to show that their arch is arm64 - Get-ChildItem -Recurse -Path $UnzippedWhl *win_amd64* | Rename-Item -NewName { $_.Name -replace 'win_amd64', 'win_arm64' } - $DIST_DIR = (Get-ChildItem -Recurse -Path $UnzippedWhl *dist-info).FullName - - #Changing amd64 references from metafiles - (GET-Content $DIST_DIR/RECORD) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/RECORD - (GET-Content $DIST_DIR/WHEEL) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/WHEEL - - #Packing whl file - Compress-Archive -Path $UnzippedWhl\* -DestinationPath $ZipWhlName -Force - $WhlName = $WhlName.Replace("win_amd64", "win_arm64") - Rename-Item -Path $ZipWhlName $WhlName - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Upload Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: ${{ env.python_version }}-win_arm64 - path: ./*.whl - - - name: Setup Mamba - uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - # - name: Upload wheels - # if: success() - # shell: bash -el {0} - # # see https://github.com/marketplace/actions/setup-miniconda for why - # # `-el {0}` is required. - # env: - # NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - # run: | - # source tools/wheels/upload_wheels.sh - # set_upload_vars - # # trigger an upload to - # # https://anaconda.org/scientific-python-nightly-wheels/numpy - # # for cron jobs or "Run workflow" (restricted to main branch). - # # Tags will upload to - # # https://anaconda.org/multibuild-wheels-staging/numpy - # # The tokens were originally generated at anaconda.org - # upload_wheels - From ec8edceb1a8b94883a1e6e474d615d9be0b6b9e7 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 3 Jul 2025 11:47:22 +0100 Subject: [PATCH 176/294] TYP: rename `_T` to `_ScalarT` in `matlib.pyi` for consistency (#29310) --- numpy/matlib.pyi | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index baeadc078028..93bee1975df3 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -507,8 +507,8 @@ __all__ += np.__all__ ### -_T = TypeVar("_T", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] _Order: TypeAlias = Literal["C", "F"] ### @@ -517,7 +517,7 @@ _Order: TypeAlias = Literal["C", "F"] @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -525,7 +525,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -533,7 +533,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -541,7 +541,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -555,9 +555,9 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... @@ -575,8 +575,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... @overload -def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... From 85708b3c0bd3d2fdbcb00cb08d0a7b323ce3af2d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 3 Jul 2025 12:05:06 +0100 Subject: [PATCH 177/294] TYP: Type ``MaskedArray.dot`` and ``MaskedArray.anom`` (#29309) --- numpy/ma/core.pyi | 16 ++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 9 +++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 3fad41a1434e..ca9826c7d083 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1200,7 +1200,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: _ArrayT, ) -> _ArrayT: ... - def dot(self, b, out=..., strict=...): ... + # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. + @overload + def dot(self, b: ArrayLike, out: None = ..., strict: bool = ...) -> _MaskedArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... # Keep in sync with `ndarray.cumsum` @@ -1223,7 +1228,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... - def anom(self, axis=..., dtype=...): ... + + @overload + def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... + @overload + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 9e8ab98ec4ce..c26dca0ed9ec 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -373,6 +373,11 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) + assert_type(MAR_b.transpose(), MaskedArray[np.bool]) assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) @@ -380,6 +385,10 @@ assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.d assert_type(MAR_b.T, MaskedArray[np.bool]) assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclass) + assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) From 1dac511d00980b2a30e7fff23312bf84922dc0c2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 3 Jul 2025 15:37:03 -0600 Subject: [PATCH 178/294] MAINT: remove out-of-date comment --- numpy/_core/src/multiarray/mapping.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 2ce7dcdb234a..7483448e632b 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2110,8 +2110,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - // TODO: the heuristic used here to determine the src_dtype might be subtly wrong - // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, descr, PyArray_DESCR(self), From 2273cd8d1e8be9a9ec237f6115cf59d4c071ebd2 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Fri, 4 Jul 2025 13:14:00 -0700 Subject: [PATCH 179/294] DOC: Fix spelling (#29320) --- doc/source/reference/arrays.classes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 8a2e804eb36b..80821c2c08fa 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -32,7 +32,7 @@ Note that :func:`asarray` always returns the base-class ndarray. If you are confident that your use of the array object can handle any subclass of an ndarray, then :func:`asanyarray` can be used to allow subclasses to propagate more cleanly through your subroutine. In -principal a subclass could redefine any aspect of the array and +principle, a subclass could redefine any aspect of the array and therefore, under strict guidelines, :func:`asanyarray` would rarely be useful. However, most subclasses of the array object will not redefine certain aspects of the array object such as the buffer From c21df31bb298384430c0f99765a55926ff69e39f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 5 Jul 2025 16:14:09 -0600 Subject: [PATCH 180/294] MAINT: Rename nep-0049.rst. Rename to nep-0049-data-allocation-strategies.rst. Closes #29323. --- .../{nep-0049.rst => nep-0049-data-allocation-strategies.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/neps/{nep-0049.rst => nep-0049-data-allocation-strategies.rst} (100%) diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049-data-allocation-strategies.rst similarity index 100% rename from doc/neps/nep-0049.rst rename to doc/neps/nep-0049-data-allocation-strategies.rst From 2a445f43a7574ab1d80ac6a7981b62829a863064 Mon Sep 17 00:00:00 2001 From: Charlie Lin Date: Sun, 6 Jul 2025 20:32:45 -0400 Subject: [PATCH 181/294] BLD: update `highway` submodule to latest master Fixes #29130 --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 12b325bc1793..37c08e5528f6 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 12b325bc1793dee68ab2157995a690db859fe9e0 +Subproject commit 37c08e5528f63ead9c7e4fd99ba454c1b1a3e3f7 From 48bc85af1e8aae77f8325de0babb8bbb24cb2fab Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Mon, 7 Jul 2025 00:26:33 -0700 Subject: [PATCH 182/294] DOC: Clarify assert_allclose differences vs. allclose (#29325) Resolves #9988 --- numpy/testing/_private/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 5f8dfc209283..2d45fa30b8a6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1625,9 +1625,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. - The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note - that ``allclose`` has different default values). It compares the difference - between `actual` and `desired` to ``atol + rtol * abs(desired)``. + The test is equivalent to ``allclose(actual, desired, rtol, atol)``, + except that it is stricter: it doesn't broadcast its operands, and has + tighter default tolerance values. It compares the difference between + `actual` and `desired` to ``atol + rtol * abs(desired)``. Parameters ---------- From 6a6b171ef8123c236f7afc59ff9ce3e011895ed9 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Mon, 7 Jul 2025 03:29:31 -0400 Subject: [PATCH 183/294] TST: Add test for non-npy files in npz and different names (#29313) * TST: Add test for non-npy files in npz and different names Retrieving `a` from an npz file will first check for a file `a`, then for `a.npy`; this checks both. * BUG: Argument to read should be int not str Old behavior is to read everything, which will happen if I pass no argument. --------- Co-authored-by: Sebastian Berg --- numpy/lib/_npyio_impl.py | 2 +- numpy/lib/tests/test_io.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 36ead97a1aae..e9a8509fc685 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -261,7 +261,7 @@ def __getitem__(self, key): max_header_size=self.max_header_size ) else: - return bytes.read(key) + return bytes.read() def __contains__(self, key): return (key in self._files) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 48f579d7ddc7..05ef0be92211 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -7,6 +7,7 @@ import threading import time import warnings +import zipfile from ctypes import c_bool from datetime import datetime from io import BytesIO, StringIO @@ -217,6 +218,22 @@ def roundtrip(self, *args, **kwargs): self.arr_reloaded.fid.close() os.remove(self.arr_reloaded.fid.name) + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow From d844b79201d966ae3668f12386d0a585ea766630 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 7 Jul 2025 12:42:13 +0100 Subject: [PATCH 184/294] TYP: `svd` overload incorrectly noted `Literal[False]` to be the default for `compute_uv` (#29331) --- numpy/linalg/_linalg.pyi | 20 ++++++++++++++++++-- numpy/typing/tests/data/reveal/linalg.pyi | 2 ++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 68bd6d933921..e9b4cc0eb0e4 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -277,14 +277,30 @@ def svd( def svd( a: _ArrayLikeInt_co, full_matrices: bool = ..., - compute_uv: L[False] = ..., + *, + compute_uv: L[False], + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool, + compute_uv: L[False], hermitian: bool = ..., ) -> NDArray[float64]: ... @overload def svd( a: _ArrayLikeComplex_co, full_matrices: bool = ..., - compute_uv: L[False] = ..., + *, + compute_uv: L[False], + hermitian: bool = ..., +) -> NDArray[floating]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool, + compute_uv: L[False], hermitian: bool = ..., ) -> NDArray[floating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 417fb0d8c558..663fb888f012 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -74,8 +74,10 @@ assert_type(np.linalg.svd(AR_i8), SVDResult) assert_type(np.linalg.svd(AR_f8), SVDResult) assert_type(np.linalg.svd(AR_c16), SVDResult) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) From 85cb82c79c4923f1e3289f80498064f1b0c70f25 Mon Sep 17 00:00:00 2001 From: polaris-3 <214435818+polaris-3@users.noreply.github.com> Date: Mon, 7 Jul 2025 16:41:49 +0200 Subject: [PATCH 185/294] ENH: Improve error message in numpy.testing.assert_array_compare (#29112) Improve the error message in numpy.testing.assert_array_compare to show the position of differing elements. Currently, if there is an assertion error in functions like numpy.testing.assert_allclose, both arrays are printed. Typically the arrays are so large that they are truncated before being printed, making it hard to spot the differences. I suggest to print the indices of the differing elements as well as the corresponding values. To avoid excessive output, the number of printed out differences is limited to five. * MAINT: Different error messages for different numbers of mismatches --- .../upcoming_changes/29112.improvement.rst | 5 ++ numpy/testing/_private/utils.py | 33 ++++++++ numpy/testing/tests/test_utils.py | 83 +++++++++++++++++++ 3 files changed, 121 insertions(+) create mode 100644 doc/release/upcoming_changes/29112.improvement.rst diff --git a/doc/release/upcoming_changes/29112.improvement.rst b/doc/release/upcoming_changes/29112.improvement.rst new file mode 100644 index 000000000000..01baa668b9fe --- /dev/null +++ b/doc/release/upcoming_changes/29112.improvement.rst @@ -0,0 +1,5 @@ +Improved error message for `assert_array_compare` +------------------------------------------------- +The error message generated by `assert_array_compare` which is used by functions +like `assert_allclose`, `assert_array_less` etc. now also includes information +about the indices at which the assertion fails. \ No newline at end of file diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 2d45fa30b8a6..78f9e9a004b1 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -574,6 +574,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): Arrays are not almost equal to 9 decimals Mismatched elements: 1 / 2 (50%) + Mismatch at index: + [1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED) Max absolute difference among violations: 6.66669964e-09 Max relative difference among violations: 2.85715698e-09 ACTUAL: array([1. , 2.333333333]) @@ -875,6 +877,31 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): percent_mismatch = 100 * n_mismatch / n_elements remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' f'({percent_mismatch:.3g}%)'] + if invalids.ndim != 0: + if flagged.ndim > 0: + positions = np.argwhere(np.asarray(~flagged))[invalids] + else: + positions = np.argwhere(np.asarray(invalids)) + s = "\n".join( + [ + f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} " + f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} " + f"({names[1]})" + for p in positions[:5] + ] + ) + if len(positions) == 1: + remarks.append( + f"Mismatch at index:\n{s}" + ) + elif len(positions) <= 5: + remarks.append( + f"Mismatch at indices:\n{s}" + ) + else: + remarks.append( + f"First 5 mismatches are at indices:\n{s}" + ) with errstate(all='ignore'): # ignore errors for non-numeric types @@ -1013,6 +1040,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Arrays are not equal Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED) Max absolute difference among violations: 4.4408921e-16 Max relative difference among violations: 1.41357986e-16 ACTUAL: array([1. , 3.141593, nan]) @@ -1126,6 +1155,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', Arrays are not almost equal to 5 decimals Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 2.33333 (ACTUAL), 2.33339 (DESIRED) Max absolute difference among violations: 6.e-05 Max relative difference among violations: 2.57136612e-05 ACTUAL: array([1. , 2.33333, nan]) @@ -1247,6 +1278,8 @@ def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): Arrays are not strictly ordered `x < y` Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [0]: 1.0 (x), 1.0 (y) Max absolute difference among violations: 0. Max relative difference among violations: 0. x: array([ 1., 1., nan]) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index fcf20091ca8e..b09df821680b 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -265,6 +265,8 @@ def test_array_vs_array_not_equal(self): b = np.array([34986, 545676, 439655, 0]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 563766 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 563766\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -272,6 +274,9 @@ def test_array_vs_array_not_equal(self): a = np.array([34986, 545676, 439655.2, 563766]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n' + ' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '563766.\n' 'Max relative difference among violations: ' @@ -466,6 +471,8 @@ def test_closeness(self): self._assert_func([1.499999], [0.0], decimal=0) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 1.5\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -474,12 +481,16 @@ def test_closeness(self): a = [1.4999999, 0.00003] b = [1.49999991, 0] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(a, b, decimal=7) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -493,6 +504,8 @@ def test_simple(self): self._assert_func(x, y, decimal=4) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n' 'Max absolute difference among violations: ' '1.e-04\n' 'Max relative difference among violations: ' @@ -504,6 +517,9 @@ def test_array_vs_scalar(self): a = [5498.42354, 849.54345, 0.00] b = 5498.42354 expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n' + ' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 1.') @@ -511,6 +527,9 @@ def test_array_vs_scalar(self): self._assert_func(a, b, decimal=9) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n' + ' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 5.4722099') @@ -519,6 +538,8 @@ def test_array_vs_scalar(self): a = [5498.42354, 0.00] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -527,6 +548,8 @@ def test_array_vs_scalar(self): b = 0 expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -603,6 +626,8 @@ def all(self, *args, **kwargs): all(z) b = np.array([1., 202]).view(MyArray) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n' 'Max absolute difference among violations: 200.\n' 'Max relative difference among violations: 0.99009') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -693,6 +718,10 @@ def test_error_message(self): # Test with a different amount of decimal digits expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1.00000000001 (ACTUAL), 1.00000000002 (DESIRED)\n' + ' [1]: 2.00000000002 (ACTUAL), 2.00000000003 (DESIRED)\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -708,6 +737,8 @@ def test_error_message(self): # differs. Note that we only check for the formatting of the arrays # themselves. expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -720,6 +751,8 @@ def test_error_message(self): x = np.array([np.inf, 0]) y = np.array([np.inf, 1]) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.\n' ' ACTUAL: array([inf, 0.])\n' @@ -731,6 +764,9 @@ def test_error_message(self): x = np.array([1, 2]) y = np.array([0, 0]) expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1 (ACTUAL), 0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 2\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -742,6 +778,12 @@ def test_error_message_2(self): x = 2 y = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [2]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [3]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [4]: 2 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -750,6 +792,12 @@ def test_error_message_2(self): y = 2 x = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [1]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [2]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [3]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [4]: 1.0 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -838,6 +886,9 @@ def test_simple_arrays(self): b = np.array([2, 4, 6, 8]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 6 (x), 6 (y)\n' + ' [3]: 20 (x), 8 (y)\n' 'Max absolute difference among violations: 12\n' 'Max relative difference among violations: 1.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -849,6 +900,11 @@ def test_rank2(self): self._assert_func(x, y) expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Mismatch at indices:\n' + ' [0, 0]: 1.2 (x), 1.1 (y)\n' + ' [0, 1]: 2.3 (x), 2.2 (y)\n' + ' [1, 0]: 3.4 (x), 3.3 (y)\n' + ' [1, 1]: 4.5 (x), 4.4 (y)\n' 'Max absolute difference among violations: 0.1\n' 'Max relative difference among violations: 0.09090909') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -867,6 +923,8 @@ def test_rank3(self): y[0, 0, 0] = 0 expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Mismatch at index:\n' + ' [0, 0, 0]: 1.0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -910,12 +968,20 @@ def test_simple_items_and_array(self): y = 999090.54 expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Mismatch at index:\n' + ' [1, 1]: 999090.54 (x), 999090.54 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: 0.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0, 0]: 999090.54 (x), 3.4536 (y)\n' + ' [0, 1]: 999090.54 (x), 2390.5436 (y)\n' + ' [0, 2]: 999090.54 (x), 435.54657 (y)\n' + ' [0, 3]: 999090.54 (x), 324525.4535 (y)\n' + ' [1, 0]: 999090.54 (x), 5449.54 (y)\n' 'Max absolute difference among violations: ' '999087.0864\n' 'Max relative difference among violations: ' @@ -928,12 +994,17 @@ def test_zeroes(self): y = np.array(87654.) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [0]: 546456.0 (x), 87654.0 (y)\n' 'Max absolute difference among violations: 458802.\n' 'Max relative difference among violations: 5.23423917') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 87654.0 (x), 0.0 (y)\n' + ' [2]: 87654.0 (x), 15.455 (y)\n' 'Max absolute difference among violations: 87654.\n' 'Max relative difference among violations: ' '5670.5626011') @@ -943,12 +1014,18 @@ def test_zeroes(self): y = 0 expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 546456.0 (x), 0 (y)\n' + ' [1]: 0.0 (x), 0 (y)\n' + ' [2]: 15.455 (x), 0 (y)\n' 'Max absolute difference among violations: 546456.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: 0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1134,12 +1211,16 @@ def test_simple(self): b = np.array([x, y, x, x]) c = np.array([x, y, x, z]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.001 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): assert_allclose(b, c) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.0 (ACTUAL), 0.001 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1155,6 +1236,8 @@ def test_report_fail_percentage(self): b = np.array([1, 1, 1, 2]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 1 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): From a63bed470e4e0e2785b1299155e8d4a34e302282 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 7 Jul 2025 15:48:03 +0100 Subject: [PATCH 186/294] TYP: Allow passing `dtype=None` to `trace` (#29332) --- numpy/__init__.pyi | 6 +++--- numpy/_core/fromnumeric.pyi | 6 +++--- numpy/linalg/_linalg.pyi | 2 +- numpy/ma/core.pyi | 6 +++--- numpy/typing/tests/data/reveal/fromnumeric.pyi | 1 + numpy/typing/tests/data/reveal/ma.pyi | 1 + 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6bcbe5b68662..d8c57c87cbbe 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2395,7 +2395,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -2404,7 +2404,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... @@ -2414,7 +2414,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index ba6669936571..34849c2cc800 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -531,7 +531,7 @@ def trace( offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -540,7 +540,7 @@ def trace( offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload @@ -549,7 +549,7 @@ def trace( offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e9b4cc0eb0e4..51844817e2dc 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -440,7 +440,7 @@ def trace( /, *, offset: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., ) -> Any: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ca9826c7d083..63dea396de66 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1177,7 +1177,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -1186,7 +1186,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... @@ -1196,7 +1196,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 62bc926c765b..a2ba83e6c4c8 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -124,6 +124,7 @@ assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index c26dca0ed9ec..7c2cb9d5f05e 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -394,6 +394,7 @@ assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) assert_type(MAR_f8.trace(), Any) assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) assert_type(MAR_f8.round(), MaskedArray[np.float64]) assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) From fdf5f1b5f2d5268e4908cbe068891bcbbc3b47bc Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 7 Jul 2025 11:25:41 -0600 Subject: [PATCH 187/294] MAINT: remove internal uses of assert_warns and suppress_warnings (#29322) * MAINT: remove internal uses of assert_warns and suppress_warnings * MAINT: adjust check for ignore filters in warnings tests * MAINT: simplify filtering per review comments * MAINT: fix warnings test * MAINT: appease linter --- numpy/_core/tests/test_api.py | 3 +- numpy/_core/tests/test_arrayprint.py | 3 +- numpy/_core/tests/test_datetime.py | 21 +-- numpy/_core/tests/test_einsum.py | 6 +- numpy/_core/tests/test_indexing.py | 5 +- numpy/_core/tests/test_mem_policy.py | 4 +- numpy/_core/tests/test_memmap.py | 7 +- numpy/_core/tests/test_multiarray.py | 51 +++---- numpy/_core/tests/test_nditer.py | 13 +- numpy/_core/tests/test_regression.py | 9 +- numpy/_core/tests/test_scalar_ctors.py | 6 +- numpy/_core/tests/test_scalarmath.py | 28 ++-- numpy/_core/tests/test_ufunc.py | 9 +- numpy/_core/tests/test_umath.py | 29 ++-- numpy/distutils/tests/test_exec_command.py | 12 +- numpy/lib/_nanfunctions_impl.py | 1 - numpy/lib/_scimath_impl.py | 6 +- numpy/lib/tests/test_format.py | 5 +- numpy/lib/tests/test_function_base.py | 16 +-- numpy/lib/tests/test_histograms.py | 16 +-- numpy/lib/tests/test_io.py | 23 ++-- numpy/lib/tests/test_nanfunctions.py | 59 ++++---- numpy/lib/tests/test_stride_tricks.py | 5 +- numpy/linalg/tests/test_deprecations.py | 11 +- numpy/linalg/tests/test_linalg.py | 12 +- numpy/ma/tests/test_core.py | 137 +++++++++---------- numpy/ma/tests/test_deprecations.py | 7 +- numpy/ma/tests/test_extras.py | 68 ++++----- numpy/ma/tests/test_regression.py | 14 +- numpy/polynomial/tests/test_polynomial.py | 5 +- numpy/random/tests/test_generator_mt19937.py | 12 +- numpy/random/tests/test_random.py | 17 +-- numpy/random/tests/test_randomstate.py | 45 +++--- numpy/tests/test_reloading.py | 6 +- numpy/tests/test_warnings.py | 9 +- 35 files changed, 318 insertions(+), 362 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index da4a8f423bc5..c2e5bf8909f9 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -12,7 +12,6 @@ assert_array_equal, assert_equal, assert_raises, - assert_warns, ) @@ -315,7 +314,7 @@ def test_object_array_astype_to_void(): def test_array_astype_warning(t): # test ComplexWarning when casting from complex to float or int a = np.array(10, dtype=np.complex128) - assert_warns(np.exceptions.ComplexWarning, a.astype, t) + pytest.warns(np.exceptions.ComplexWarning, a.astype, t) @pytest.mark.parametrize(["dtype", "out_dtype"], [(np.bytes_, np.bool), diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 1fd4ac2fddb7..a054f408e954 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import run_threaded @@ -736,7 +735,7 @@ def test_0d_arrays(self): assert_equal(str(x), "1") # check `style` arg raises - assert_warns(DeprecationWarning, np.array2string, + pytest.warns(DeprecationWarning, np.array2string, np.array(1.), style=repr) # but not in legacy mode np.array2string(np.array(1.), style=repr, legacy='1.13') diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index c5d2ca459e6a..a10ca15bc373 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,6 @@ import datetime import pickle +import warnings from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -13,8 +14,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) try: @@ -1282,8 +1281,9 @@ def test_datetime_multiply(self): assert_raises(TypeError, np.multiply, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in multiply") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in multiply", RuntimeWarning) nat = np.timedelta64('NaT') def check(a, b, res): @@ -1344,7 +1344,7 @@ def test_timedelta_floor_divide(self, op1, op2, exp): np.timedelta64(-1)), ]) def test_timedelta_floor_div_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = op1 // op2 assert_equal(actual, 0) assert_equal(actual.dtype, np.int64) @@ -1428,9 +1428,9 @@ def test_timedelta_divmod_typeerror(self, op1, op2): np.timedelta64(-1)), ]) def test_timedelta_divmod_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): expected = (op1 // op2, op1 % op2) - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = divmod(op1, op2) assert_equal(actual, expected) @@ -1482,8 +1482,9 @@ def test_datetime_divide(self): assert_raises(TypeError, np.divide, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, r".*encountered in divide") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', r".*encountered in divide", RuntimeWarning) nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) @@ -2045,7 +2046,7 @@ def test_timedelta_modulus_error(self, val1, val2): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_timedelta_modulus_div_by_zero(self): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') assert_equal(actual, np.timedelta64('NaT')) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 0bd180b5e41f..84d4af1707b6 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1,4 +1,5 @@ import itertools +import warnings import pytest @@ -11,7 +12,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Setup for optimize einsum @@ -455,8 +455,8 @@ def check_einsum_sums(self, dtype, do_opt=False): np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 81ba85ea4648..2a8a669d0787 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -16,7 +16,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -784,12 +783,12 @@ def test_boolean_index_cast_assign(self): assert_equal(zero_array[0, 1], 1) # Fancy indexing works, although we get a cast warning. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index b9f971e73249..70befb8bd324 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -9,7 +9,7 @@ import numpy as np from numpy._core.multiarray import get_handler_name -from numpy.testing import IS_EDITABLE, IS_WASM, assert_warns, extbuild +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -432,7 +432,7 @@ def test_switch_owner(get_module, policy): # The policy should be NULL, so we have to assume we can call # "free". A warning is given if the policy == "1" if policy: - with assert_warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning) as w: del a gc.collect() else: diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index cbd825205844..49931e1680e8 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -1,6 +1,7 @@ import mmap import os import sys +import warnings from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryFile @@ -26,7 +27,6 @@ assert_array_equal, assert_equal, break_cycles, - suppress_warnings, ) @@ -167,8 +167,9 @@ def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "np.average currently does not preserve", FutureWarning) for unary_op in [sum, average, prod]: result = unary_op(fp) assert_(isscalar(result)) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 4ee021d18f6f..930c736c6076 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -48,11 +48,9 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, break_cycles, check_support_sve, runstring, - suppress_warnings, temppath, ) from numpy.testing._private.utils import _no_tracing, requires_memory @@ -3805,11 +3803,11 @@ def test__complex__(self): ap = complex(a) assert_equal(ap, a, msg) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): bp = complex(b) assert_equal(bp, b, msg) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): cp = complex(c) assert_equal(cp, c, msg) @@ -3833,7 +3831,7 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_raises(TypeError, complex, e) class TestCequenceMethods: @@ -4932,9 +4930,11 @@ class TestArgmax: @pytest.mark.parametrize('data', nan_arr) def test_combinations(self, data): arr, pos = data - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) val = np.max(arr) assert_equal(np.argmax(arr), pos, err_msg=f"{arr!r}") @@ -5074,9 +5074,11 @@ class TestArgmin: @pytest.mark.parametrize('data', nan_arr) def test_combinations(self, data): arr, pos = data - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) min_val = np.min(arr) assert_equal(np.argmin(arr), pos, err_msg=f"{arr!r}") @@ -7268,8 +7270,8 @@ def test_out_arg(self): out = np.zeros((5, 2), dtype=np.complex128) c = self.matmul(a, b, out=out) assert_(c is out) - with suppress_warnings() as sup: - sup.filter(ComplexWarning, '') + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) c = c.astype(tgt.dtype) assert_array_equal(c, tgt) @@ -8816,8 +8818,9 @@ def test_multiarray_writable_attributes_deletion(self): # ticket #2046, should not seqfault, raise AttributeError a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "Assigning the 'data' attribute") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "Assigning the 'data' attribute", DeprecationWarning) for s in attr: assert_raises(AttributeError, delattr, a, s) @@ -9071,9 +9074,9 @@ def test_to_int_scalar(self): int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_equal(int_func(np.array([1])), 1) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_equal(int_func(np.array([[42]])), 42) assert_raises(TypeError, int_func, np.array([1, 2])) @@ -9087,7 +9090,7 @@ def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_raises(NotImplementedError, int_func, np.array([NotConvertible()])) @@ -9677,12 +9680,10 @@ def test_view_assign(self): @pytest.mark.leaks_references( reason="increments self in dealloc; ignore since deprecated path.") def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T + arr = np.arange(9).reshape(3, 3) + v = arr.T + with pytest.warns(RuntimeWarning): _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( @@ -10202,8 +10203,8 @@ def test_strided_loop_alignments(self): xf128 = _aligned_zeros(3, np.longdouble, align=align) # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(ComplexWarning, "Casting complex values") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "Casting complex values", ComplexWarning) xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index f71130f16331..f0e10333808c 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,6 +1,7 @@ import subprocess import sys import textwrap +import warnings import pytest @@ -15,7 +16,6 @@ assert_array_equal, assert_equal, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -1899,8 +1899,8 @@ def test_iter_buffered_cast_byteswapped(): assert_equal(a, 2 * np.arange(10, dtype='f4')) - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) a = np.arange(10, dtype='f8') a = a.view(a.dtype.newbyteorder()).byteswap() @@ -3310,13 +3310,10 @@ def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap() au = au.view(au.dtype.newbyteorder()) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) + casting='equiv', op_dtypes=[np.dtype('f4')]) del it - assert len(sup.log) == 1 - @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index d9a3a529c9ea..2aeb29a1320d 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -3,6 +3,7 @@ import pickle import sys import tempfile +import warnings from io import BytesIO from itertools import chain from os import path @@ -27,8 +28,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) from numpy.testing._private.utils import _no_tracing, requires_memory @@ -1619,9 +1618,9 @@ def test_fromfile_tofile_seeks(self): def test_complex_scalar_warning(self): for tp in [np.csingle, np.cdouble, np.clongdouble]: x = tp(1 + 2j) - assert_warns(ComplexWarning, float, x) - with suppress_warnings() as sup: - sup.filter(ComplexWarning) + pytest.warns(ComplexWarning, float, x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) assert_equal(float(x), float(x.real)) def test_complex_scalar_complex_cast(self): diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index ea78cd9d9f51..05ede01b5973 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -4,7 +4,7 @@ import pytest import numpy as np -from numpy.testing import assert_almost_equal, assert_equal, assert_warns +from numpy.testing import assert_almost_equal, assert_equal class TestFromString: @@ -25,7 +25,7 @@ def test_floating_overflow(self): assert_equal(fsingle, np.inf) fdouble = np.double('1e10000') assert_equal(fdouble, np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '1e10000') assert_equal(flongdouble, np.inf) fhalf = np.half('-1e10000') @@ -34,7 +34,7 @@ def test_floating_overflow(self): assert_equal(fsingle, -np.inf) fdouble = np.double('-1e10000') assert_equal(fdouble, -np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, -np.inf) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 746b410f79d2..8c24b4cfdc88 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -23,7 +23,6 @@ assert_equal, assert_raises, check_support_sve, - suppress_warnings, ) types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, @@ -369,12 +368,7 @@ def test_float_modulus_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") - sup.filter(RuntimeWarning, "invalid value encountered in divmod") + with warnings.catch_warnings(), np.errstate(all='ignore'): for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -522,21 +516,17 @@ def test_int_from_infinite_longdouble(self): # gh-627 x = np.longdouble(np.inf) assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") def test_int_from_infinite_longdouble___int__(self): x = np.longdouble(np.inf) assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @@ -731,8 +721,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] if dt in np.typecodes['UnsignedInteger']: @@ -749,8 +739,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] assert_equal(operator.sub(a, a), 0) @@ -771,8 +761,8 @@ def _test_abs_func(self, absfunc, test_dtype): x = test_dtype(np.finfo(test_dtype).max) assert_equal(absfunc(x), x.real) - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) x = test_dtype(np.finfo(test_dtype).tiny) assert_equal(absfunc(x), x.real) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 836be1245399..b86e22917734 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -26,7 +26,6 @@ assert_equal, assert_no_warnings, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -686,8 +685,8 @@ def test_true_divide(self): tgt = float(x) / float(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -706,8 +705,8 @@ def test_true_divide(self): tgt = complex(x) / complex(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index b5145e9d642b..d8ed56c31b93 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -30,7 +30,6 @@ assert_no_warnings, assert_raises, assert_raises_regex, - suppress_warnings, ) from numpy.testing._private.utils import _glibc_older_than @@ -703,8 +702,8 @@ def test_floor_division_corner_cases(self, dtype): fone = np.array(1.0, dtype=dtype) fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) div = np.floor_divide(fnan, fone) assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) @@ -859,9 +858,9 @@ def test_float_divmod_corner_cases(self): fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in divmod") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) + warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) div, rem = np.divmod(fone, fzer) assert np.isinf(div), f'dt: {dt}, div: {rem}' assert np.isnan(rem), f'dt: {dt}, rem: {rem}' @@ -898,9 +897,9 @@ def test_float_remainder_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "invalid value encountered in fmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) + warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -2957,9 +2956,11 @@ def test_minmax_blocked(self): inp[:] = np.arange(inp.size, dtype=dt) inp[i] = np.nan emsg = lambda: f'{inp!r}\n{msg}' - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) assert_(np.isnan(inp.max()), msg=emsg) assert_(np.isnan(inp.min()), msg=emsg) @@ -4603,8 +4604,8 @@ def test_nextafter_0(): for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): # The value of tiny for double double is NaN, so we need to pass the # assert - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(t).tiny): tiny = np.finfo(t).tiny assert_( diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py index d1a20056a5a2..749523528e63 100644 --- a/numpy/distutils/tests/test_exec_command.py +++ b/numpy/distutils/tests/test_exec_command.py @@ -5,7 +5,7 @@ from numpy.distutils import exec_command from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, assert_warns, IS_WASM +from numpy.testing import tempdir, assert_, IS_WASM # In python 3 stdout, stderr are text (unicode compliant) devices, so to @@ -68,7 +68,7 @@ def test_exec_command_stdout(): # Test posix version: with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") if os.name == 'posix': @@ -76,14 +76,14 @@ def test_exec_command_stdout(): with emulate_nonposix(): with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") def test_exec_command_stderr(): # Test posix version: with redirect_stdout(TemporaryFile(mode='w+')): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") if os.name == 'posix': @@ -91,7 +91,7 @@ def test_exec_command_stderr(): with emulate_nonposix(): with redirect_stdout(TemporaryFile()): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") @@ -206,7 +206,7 @@ def check_execute_in(self, **kws): def test_basic(self): with redirect_stdout(StringIO()): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): if os.name == "posix": self.check_posix(use_tee=0) self.check_posix(use_tee=1) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 4a01490301c8..aec60d484ba4 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -717,7 +717,6 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, inf >>> np.nansum([1, np.nan, -np.inf]) -inf - >>> from numpy.testing import suppress_warnings >>> with np.errstate(invalid="ignore"): ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present np.float64(nan) diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index 8136a7d54515..b33f42b3d10d 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -628,9 +628,9 @@ def arctanh(x): >>> np.emath.arctanh(0.5) 0.5493061443340549 - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) + >>> import warnings + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', RuntimeWarning) ... np.emath.arctanh(np.eye(2)) array([[inf, 0.], [ 0., inf]]) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index cf11dd25dbda..a6de6238d269 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -291,7 +291,6 @@ assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import requires_memory @@ -1008,7 +1007,7 @@ def test_unicode_field_names(tmpdir): # notifies the user that 3.0 is selected with open(fname, 'wb') as f: - with assert_warns(UserWarning): + with pytest.warns(UserWarning): format.write_array(f, arr, version=None) def test_header_growth_axis(): @@ -1041,7 +1040,7 @@ def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() - with assert_warns(UserWarning): + with pytest.warns(UserWarning): np.save(buf, arr) buf.seek(0) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index eccf4bcfb019..37f8bbadc31a 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -61,8 +61,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) np_floats = [np.half, np.single, np.double, np.longdouble] @@ -2473,10 +2471,10 @@ def test_simple(self): def test_ddof(self): # ddof raises DeprecationWarning - with suppress_warnings() as sup: + with warnings.catch_warnings(): warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - sup.filter(DeprecationWarning) + pytest.warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + warnings.simplefilter('ignore', DeprecationWarning) # ddof has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) @@ -2485,11 +2483,11 @@ def test_ddof(self): def test_bias(self): # bias raises DeprecationWarning - with suppress_warnings() as sup: + with warnings.catch_warnings(): warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) - sup.filter(DeprecationWarning) + pytest.warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + pytest.warns(DeprecationWarning, corrcoef, self.A, bias=0) + warnings.simplefilter('ignore', DeprecationWarning) # bias has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, bias=1), self.res1) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 4ba953f462fc..fec8828d242e 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -1,3 +1,5 @@ +import warnings + import pytest import numpy as np @@ -12,7 +14,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) @@ -138,11 +139,9 @@ def test_bool_conversion(self): # Should raise an warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs - with suppress_warnings() as sup: - rec = sup.record(RuntimeWarning, 'Converting input from .*') + with pytest.warns(RuntimeWarning, match='Converting input from .*'): hist, edges = np.histogram([True, True, False]) # A warning should be issued - assert_equal(len(rec), 1) assert_array_equal(hist, int_hist) assert_array_equal(edges, int_edges) @@ -284,9 +283,8 @@ def test_some_nan_values(self): all_nan = np.array([np.nan, np.nan]) # the internal comparisons with NaN give warnings - sup = suppress_warnings() - sup.filter(RuntimeWarning) - with sup: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) # can't infer range with nan assert_raises(ValueError, histogram, one_nan, bins='auto') assert_raises(ValueError, histogram, all_nan, bins='auto') @@ -618,9 +616,9 @@ def test_integer(self, bins): """ Test that bin width for integer data is at least 1. """ - with suppress_warnings() as sup: + with warnings.catch_warnings(): if bins == 'stone': - sup.filter(RuntimeWarning) + warnings.simplefilter('ignore', RuntimeWarning) assert_equal( np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), np.arange(9)) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 05ef0be92211..a1698f240bff 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -36,9 +36,7 @@ assert_no_warnings, assert_raises, assert_raises_regex, - assert_warns, break_cycles, - suppress_warnings, tempdir, temppath, ) @@ -336,8 +334,9 @@ def test_closing_fid(self): # goes to zero. Python running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. - with suppress_warnings() as sup: - sup.filter(ResourceWarning) # TODO: specify exact message + with warnings.catch_warnings(): + # TODO: specify exact message + warnings.simplefilter('ignore', ResourceWarning) for i in range(1, 1025): try: np.load(tmp)["data"] @@ -1446,8 +1445,8 @@ def test_skip_footer(self): assert_equal(test, ctrl) def test_skip_footer_with_invalid(self): - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' # Footer too small to get rid of all invalid values assert_raises(ValueError, np.genfromtxt, @@ -1845,8 +1844,8 @@ def test_usecols_with_named_columns(self): def test_empty_file(self): # Test that an empty file raises the proper warning. - with suppress_warnings() as sup: - sup.filter(message="genfromtxt: Empty input file:") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message="genfromtxt: Empty input file:") data = TextIO() test = np.genfromtxt(data) assert_equal(test, np.array([])) @@ -1991,7 +1990,7 @@ def test_invalid_raise(self): def f(): return np.genfromtxt(mdata, invalid_raise=False, **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # @@ -2012,7 +2011,7 @@ def test_invalid_raise_with_usecols(self): def f(): return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # @@ -2424,8 +2423,8 @@ def test_max_rows(self): assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) # Test with invalid not raise - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 89a6d1f95fed..447b84db3edc 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Test data @@ -281,8 +280,9 @@ def test_mutation(self): def test_result_values(self): for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): for row in _ndat: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in", RuntimeWarning) ind = f(row) val = row[ind] # comparing with NaN is tricky as the result @@ -491,10 +491,10 @@ def test_dtype_from_dtype(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type assert_(res is tgt) @@ -508,10 +508,10 @@ def test_dtype_from_char(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=c, axis=1).dtype.type res = nf(mat, dtype=c, axis=1).dtype.type assert_(res is tgt) @@ -723,22 +723,22 @@ def test_ddof(self): res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) - def test_ddof_too_big(self): + def test_ddof_too_big(self, recwarn): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - sup.filter(ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) - if any(tgt): - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 0) + if any(tgt): + assert_(len(recwarn) == 1) + recwarn.pop(RuntimeWarning) + else: + assert_(len(recwarn) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) @@ -860,8 +860,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanmedian(d, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1), keepdims=True) @@ -946,17 +946,15 @@ def test_result_values(self): @pytest.mark.parametrize("dtype", _TYPE_CODES) def test_allnans(self, dtype, axis): mat = np.full((3, 3), np.nan).astype(dtype) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - + with pytest.warns(RuntimeWarning) as r: output = np.nanmedian(mat, axis=axis) assert output.dtype == mat.dtype assert np.isnan(output).all() if axis is None: - assert_(len(sup.log) == 1) + assert_(len(r) == 1) else: - assert_(len(sup.log) == 3) + assert_(len(r) == 3) # Check scalar scalar = np.array(np.nan).astype(dtype)[()] @@ -965,9 +963,9 @@ def test_allnans(self, dtype, axis): assert np.isnan(output_scalar) if axis is None: - assert_(len(sup.log) == 2) + assert_(len(r) == 2) else: - assert_(len(sup.log) == 4) + assert_(len(r) == 4) def test_empty(self): mat = np.zeros((0, 3)) @@ -995,8 +993,8 @@ def test_extended_axis_invalid(self): assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) def test_float_special(self): - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) @@ -1063,8 +1061,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanpercentile(d, 90, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) @@ -1233,8 +1231,9 @@ def test_multiple_percentiles(self): large_mat[:, :, 3:] *= 2 for axis in [None, 0, 1]: for keepdim in [False, True]: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "All-NaN slice encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "All-NaN slice encountered", RuntimeWarning) val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) nan_val = np.nanpercentile(nan_mat, perc, axis=axis, keepdims=keepdim) diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index fe40c953a147..fb654b4cfb85 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -16,7 +16,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -593,9 +592,9 @@ def test_writeable(): for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version if array_is_broadcast: - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): assert_equal(result.flags.writeable, True) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): result[:] = 0 # Warning not emitted, writing to the array resets it assert_equal(result.flags.writeable, True) diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py index cd4c10832e7e..7fb5008f1ff8 100644 --- a/numpy/linalg/tests/test_deprecations.py +++ b/numpy/linalg/tests/test_deprecations.py @@ -1,8 +1,9 @@ """Test deprecation and future warnings. """ +import pytest + import numpy as np -from numpy.testing import assert_warns def test_qr_mode_full_future_warning(): @@ -14,7 +15,7 @@ def test_qr_mode_full_future_warning(): """ a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='full') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='f') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index cbf7dd63be5e..8ad1c3ed6d16 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -8,6 +8,7 @@ import textwrap import threading import traceback +import warnings import pytest @@ -42,7 +43,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) try: @@ -1318,8 +1318,9 @@ def test_vector_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 0.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) @@ -1481,8 +1482,9 @@ def test_matrix_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 2.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 091ba6c99fff..2704857308a3 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -139,32 +139,24 @@ assert_not_equal, fail_if_equal, ) -from numpy.testing import ( - IS_WASM, - assert_raises, - assert_warns, - suppress_warnings, - temppath, -) +from numpy.testing import IS_WASM, assert_raises, temppath from numpy.testing._private.utils import requires_memory pi = np.pi -suppress_copy_mask_on_assignment = suppress_warnings() -suppress_copy_mask_on_assignment.filter( - numpy.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a shared mask will not copy") - - # For parametrized numeric testing num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] num_ids = [dt_.char for dt_ in num_dts] +WARNING_MESSAGE = ("setting an item on a masked array which has a shared " + "mask will not copy") +WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" class TestMaskedArray: # Base test class for MaskedArrays. + # message for warning filters def setup_method(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) @@ -492,7 +484,7 @@ def test_setitem_no_warning(self): x[...] = value x[[0, 1, 2]] = value - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] @@ -511,9 +503,9 @@ def test_copy(self): y1a = array(y1) # Default for masked array is not to copy; see gh-10318. assert_(y1a._data.__array_interface__ == - y1._data.__array_interface__) + y1._data.__array_interface__) assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) + y1._mask.__array_interface__) y2 = array(x1, mask=m3) assert_(y2._data.__array_interface__ == x1.__array_interface__) @@ -612,7 +604,7 @@ def test_format(self): # assert_equal(format(masked, " <5"), "-- ") # Expect a FutureWarning for using format_spec with MaskedElement - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): with_format_string = format(masked, " >5") assert_equal(with_format_string, "--") @@ -793,8 +785,9 @@ def test_topython(self): assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) - with suppress_warnings() as sup: - sup.filter(UserWarning, 'Warning: converting a masked element') + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', 'Warning: converting a masked element', UserWarning) assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) @@ -847,14 +840,17 @@ def test_oddfeatures_2(self): assert_(z[1] is not masked) assert_(z[2] is masked) - @suppress_copy_mask_on_assignment def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) + msg = "setting an item on a masked array which has a shared mask will not copy" + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', msg, numpy.ma.core.MaskedArrayFutureWarning) + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) def test_filled_with_object_dtype(self): a = np.ma.masked_all(1, dtype='O') @@ -2004,6 +2000,7 @@ def test_comparisons_strings(self, op, fill): ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") def test_eq_with_None(self): # Really, comparisons with None should not be done, but check them # anyway. Note that pep8 will flag these tests. @@ -2011,23 +2008,21 @@ def test_eq_with_None(self): # test will fail (and have to be changed accordingly). # With partial mask - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Comparison to `None`") - a = array([None, 1], mask=[0, 1]) - assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 - assert_equal(a.data == None, [True, False]) # noqa: E711 - assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 - # With nomask - a = array([None, 1], mask=False) - assert_equal(a == None, [True, False]) # noqa: E711 - assert_equal(a != None, [False, True]) # noqa: E711 - # With complete mask - a = array([None, 2], mask=True) - assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 - assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 - # Fully masked, even comparison to None should return "masked" - a = masked - assert_equal(a == None, masked) # noqa: E711 + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 def test_eq_with_scalar(self): a = array(1) @@ -2383,16 +2378,15 @@ def test_fillvalue(self): assert_equal(x.fill_value, 999.) assert_equal(x._fill_value, np.array(999.)) + @pytest.mark.filterwarnings("ignore:.*Numpy has detected.*:FutureWarning") def test_subarray_fillvalue(self): # gh-10483 test multi-field index fill value fields = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Numpy has detected") - subfields = fields[['i', 'f']] - assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) - # test comparison does not raise: - subfields[1:] == subfields[:-1] + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] def test_fillvalue_exotic_dtype(self): # Tests yet more exotic flexible dtypes @@ -3138,14 +3132,14 @@ def test_inplace_floor_division_array_type(self): def test_inplace_division_scalar_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) - + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in self.othertypes: (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3159,28 +3153,29 @@ def test_inplace_division_scalar_type(self): try: x /= t(2) assert_equal(x, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= t(2) assert_equal(xm, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_division_array_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in self.othertypes: (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3194,8 +3189,8 @@ def test_inplace_division_array_type(self): try: x /= a assert_equal(x, y / a) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= a assert_equal(xm, y / a) @@ -3203,13 +3198,13 @@ def test_inplace_division_array_type(self): xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power @@ -3508,7 +3503,7 @@ def test_ones(self): b = a.view() assert_(np.may_share_memory(a.mask, b.mask)) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_put(self): # Tests put. d = arange(5) @@ -4226,7 +4221,7 @@ def test_varstd(self): assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) @@ -5582,7 +5577,7 @@ def test_coercion_int(self): def test_coercion_float(self): a_f = np.zeros((), float) - assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) assert_(np.isnan(a_f[()])) @pytest.mark.xfail(reason="See gh-9750") diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 8cc8b9c72bb9..a2c98d0c229d 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -9,7 +9,6 @@ import numpy as np from numpy.ma.core import MaskedArrayFutureWarning from numpy.ma.testutils import assert_equal -from numpy.testing import assert_warns class TestArgsort: @@ -23,7 +22,7 @@ def _test_base(self, argsort, cls): # argsort has a bad default for >1d arrays arr_2d = np.array([[1, 2], [3, 4]]).view(cls) - result = assert_warns( + result = pytest.warns( np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) assert_equal(result, argsort(arr_2d, axis=None)) @@ -53,10 +52,10 @@ def test_axis_default(self): ma_max = np.ma.maximum.reduce # check that the default axis is still None, but warns on 2d arrays - result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_max, data2d) assert_equal(result, ma_max(data2d, axis=None)) - result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_min, data2d) assert_equal(result, ma_min(data2d, axis=None)) # no warnings on 1d, as both new and old defaults are equivalent diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 3d10e839cbc9..34a032087536 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -68,7 +68,6 @@ assert_array_equal, assert_equal, ) -from numpy.testing import assert_warns, suppress_warnings class TestGeneric: @@ -746,7 +745,7 @@ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): res = func(x, axis=axis) assert_equal(res, mask_rowcols(x, rowcols_axis)) @@ -1287,19 +1286,14 @@ def test_special(self): def test_empty(self): # empty arrays a = np.ma.masked_array(np.array([], dtype=float)) - with suppress_warnings() as w: - w.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # multiple dimensions a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) # no axis - with suppress_warnings() as w: - w.record(RuntimeWarning) - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # axis 0 and 1 b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) @@ -1308,10 +1302,8 @@ def test_empty(self): # axis 2 b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_equal(np.ma.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.ma.masked_array(np.arange(7.)) @@ -1418,10 +1410,13 @@ def test_ddof(self): x, y = self.data, self.data2 expected = np.corrcoef(x) expected2 = np.corrcoef(x, y) - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with pytest.warns(DeprecationWarning): + corrcoef(x, ddof=-1) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) + # ddof has no or negligible effect on the function assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) assert_almost_equal(corrcoef(x, ddof=-1), expected) @@ -1433,12 +1428,16 @@ def test_bias(self): x, y = self.data, self.data2 expected = np.corrcoef(x) # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, y, True, False) - assert_warns(DeprecationWarning, corrcoef, x, y, True, True) - assert_warns(DeprecationWarning, corrcoef, x, bias=False) - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with pytest.warns(DeprecationWarning): + corrcoef(x, y, True, False) + with pytest.warns(DeprecationWarning): + corrcoef(x, y, True, True) + with pytest.warns(DeprecationWarning): + corrcoef(x, y, bias=False) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # bias has no or negligible effect on the function assert_almost_equal(corrcoef(x, bias=1), expected) @@ -1448,8 +1447,9 @@ def test_1d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) @@ -1459,8 +1459,9 @@ def test_2d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) @@ -1473,8 +1474,9 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) try: @@ -1486,8 +1488,9 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # ddof and bias have no or negligible effect on the function assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1], bias=1)) @@ -1503,8 +1506,9 @@ def test_2d_with_missing(self): test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # ddof and bias have no or negligible effect on the function assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], control[:-1, :-1]) diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index 025387ba454c..2a08234cba61 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,10 +1,7 @@ +import warnings + import numpy as np -from numpy.testing import ( - assert_, - assert_allclose, - assert_array_equal, - suppress_warnings, -) +from numpy.testing import assert_, assert_allclose, assert_array_equal class TestRegression: @@ -67,8 +64,9 @@ def test_ddof_corrcoef(self): x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) y = np.array([2, 2.5, 3.1, 3, 5]) # this test can be removed after deprecation. - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) r0 = np.ma.corrcoef(x, y, ddof=0) r1 = np.ma.corrcoef(x, y, ddof=1) # ddof should not have an effect (it gets cancelled out) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 8bfa3c184cf7..5c8e85c7f860 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -6,6 +6,8 @@ from fractions import Fraction from functools import reduce +import pytest + import numpy as np import numpy.polynomial.polynomial as poly import numpy.polynomial.polyutils as pu @@ -16,7 +18,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -656,7 +657,7 @@ def test_fit_degenerate_domain(self): assert_equal(p.coef, [2.]) p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) assert_almost_equal(p.coef, [2.05]) - with assert_warns(pu.RankWarning): + with pytest.warns(pu.RankWarning): p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) def test_result_type(self): diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index d09cbba4ec39..50c232d4a8e7 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,6 +1,7 @@ import hashlib import os.path import sys +import warnings import pytest @@ -17,8 +18,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) random = Generator(MT19937()) @@ -1463,8 +1462,8 @@ def test_multivariate_normal(self, method): # Check that non positive-semidefinite covariance warns with # RuntimeWarning cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov, method='eigh') assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') @@ -1491,10 +1490,9 @@ def test_multivariate_normal(self, method): method='cholesky') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter("error") random.multivariate_normal(mean, cov, method=method) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index d5981906f6ef..4eb455eb77be 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -13,8 +13,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) @@ -331,10 +329,8 @@ def test_randint(self): def test_random_integers(self): np.random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -346,11 +342,9 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) @@ -795,7 +789,7 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(np.random.multivariate_normal, mean, cov, @@ -806,10 +800,9 @@ def test_multivariate_normal(self): check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter('error') np.random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 def test_negative_binomial(self): np.random.seed(self.seed) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index cf4488543c12..c56d3c0f186c 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -16,8 +16,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) INT_FUNCS = {'binomial': (100.0, 0.6), @@ -242,12 +240,10 @@ def test_negative_binomial(self): def test_get_state_warning(self): rs = random.RandomState(PCG64()) - with suppress_warnings() as sup: - w = sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): state = rs.get_state() - assert_(len(w) == 1) - assert isinstance(state, dict) - assert state['bit_generator'] == 'PCG64' + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): state = self.random_state.get_state() @@ -487,20 +483,16 @@ def test_randint(self): def test_random_integers(self): random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(198, size=(3, 2)) - assert_(len(w) == 1) assert_array_equal(actual, desired + 100) def test_tomaxint(self): @@ -529,20 +521,16 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): typer = np.dtype('l').type actual = random.random_integers(typer(np.iinfo('l').max), typer(np.iinfo('l').max)) - assert_(len(w) == 1) assert_equal(actual, desired) def test_random_integers_deprecated(self): @@ -879,8 +867,8 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [1.1] * 10) assert_raises(ValueError, random.geometric, -0.1) assert_raises(ValueError, random.geometric, [-0.1] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.geometric, np.nan) assert_raises(ValueError, random.geometric, [np.nan] * 10) @@ -1012,7 +1000,7 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(random.multivariate_normal, mean, cov, @@ -1023,10 +1011,9 @@ def test_multivariate_normal(self): check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) @@ -1048,8 +1035,8 @@ def test_negative_binomial(self): assert_array_equal(actual, desired) def test_negative_binomial_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.negative_binomial, 100, np.nan) assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10) @@ -1131,8 +1118,8 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [lamneg] * 10) assert_raises(ValueError, random.poisson, lambig) assert_raises(ValueError, random.poisson, [lambig] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.poisson, np.nan) assert_raises(ValueError, random.poisson, [np.nan] * 10) diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 9787bcbbc101..b70a715237a5 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -7,7 +7,7 @@ import pytest import numpy.exceptions as ex -from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises, assert_warns +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises def test_numpy_reloading(): @@ -19,14 +19,14 @@ def test_numpy_reloading(): VisibleDeprecationWarning = ex.VisibleDeprecationWarning ModuleDeprecationWarning = ex.ModuleDeprecationWarning - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) assert_raises(RuntimeError, reload, numpy._globals) - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 560ee6143265..7efa2a1d1896 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -34,10 +34,11 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].value == "ignore": - raise AssertionError( - "warnings should have an appropriate stacklevel; " - f"found in {self.__filename} on line {node.lineno}") + if getattr(node.args[0], "value", None) == "ignore": + if not self.__filename.name.startswith("test_"): + raise AssertionError( + "ignore filters should only be used in tests; " + f"found in {self.__filename} on line {node.lineno}") if p.ls[-1] == 'warn' and ( len(p.ls) == 1 or p.ls[-2] == 'warnings'): From 67e5848521b6a857205639be66bb1b29823aa7e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 17:35:39 -0600 Subject: [PATCH 188/294] MAINT: Bump pypa/cibuildwheel from 3.0.0 to 3.0.1 (#29334) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/5f22145df44122af0f5a201f93cf0207171beca7...95d2f3a92fbf80abe066b09418bbf128a8923df2) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 453a67088adf..ce0c2e803143 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 + - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ab6bbb618899..223ec38898cf 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -175,7 +175,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 + uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From d4c09dfde305cefe328cbc8c952dd4c41836a41c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 8 Jul 2025 16:23:42 +0200 Subject: [PATCH 189/294] TST: Avoid uninitialized values in test (#29341) I think I missed this one before once... can cause occasional warnings if uninitialized. Signed-off-by: Sebastian Berg --- numpy/_core/tests/test_ufunc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index b86e22917734..9c5489a614e8 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2129,7 +2129,8 @@ class ArrayPriorityMinus2000(ArrayPriorityBase): assert np.add(y, x) is ArrayPriorityMinus1000 assert np.add(x, xb) is ArrayPriorityMinus1000 assert np.add(xb, x) is ArrayPriorityMinus1000b - assert np.add(np.zeros(2), ArrayPriorityMinus0(2)) is ArrayPriorityMinus0 + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 assert type(np.add(xb, x, np.zeros(2))) is np.ndarray @pytest.mark.parametrize("a", ( From 46c0bffa8b137762dee19cbe1541790bd1108fb0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 8 Jul 2025 19:19:24 +0100 Subject: [PATCH 190/294] TYP: correct default value of `unicode` in `chararray.__new__` (#29340) --- numpy/_core/defchararray.pyi | 14 +++++++++++++- numpy/lib/_polynomial_impl.pyi | 26 ++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 25754965c46a..680875240096 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -114,11 +114,23 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt, + unicode: L[True], + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _CharArray[str_]: ... + @overload def __new__( subtype, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[True] = ..., + *, + unicode: L[True], buffer: _SupportsBuffer = ..., offset: SupportsIndex = ..., strides: _ShapeLike = ..., diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 3da1a2af60c9..3b0eade1399e 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -151,22 +151,44 @@ def polyfit( cov: L[True, "unscaled"] = ..., ) -> _2Tup[NDArray[complex128]]: ... @overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, rcond: float | None = ..., - full: L[True] = ..., + *, + full: L[True], w: _ArrayLikeFloat_co | None = ..., cov: bool | L["unscaled"] = ..., ) -> _5Tup[NDArray[float64]]: ... @overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... +@overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, rcond: float | None = ..., - full: L[True] = ..., + *, + full: L[True], w: _ArrayLikeFloat_co | None = ..., cov: bool | L["unscaled"] = ..., ) -> _5Tup[NDArray[complex128]]: ... From 2e93c4639f46403b995c910f01e2db33000e854a Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Wed, 9 Jul 2025 01:08:36 -0700 Subject: [PATCH 191/294] DOC: Add missing `self` in `__array_ufunc__` signature (#29343) --- doc/source/user/basics.subclassing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 7b1e8fd34512..a937521a7abb 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -469,7 +469,7 @@ implemented. The signature of ``__array_ufunc__`` is:: - def __array_ufunc__(ufunc, method, *inputs, **kwargs): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either From 34776f4c86c3399670144b6b035cb5eaf8a4ec33 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 9 Jul 2025 10:33:27 +0200 Subject: [PATCH 192/294] API,BUG: Fix scalar handling in array-interface allowing NULL pointers (#29338) This fixes the scalar handling in the array-interface and also a bug in the shape handling error path. In theory it breaks code that used NULL to trigger the scalar path, but this path was: 1. Completely undocumented, and 2. correctly triggered by *ommitting* the data field instead. I didn't remove/deprecate the scalar path in this PR, maybe we should. But, I do think we can ignore that theoretical use-case since it is nonsensical. Signed-off-by: Sebastian Berg --- doc/release/upcoming_changes/29338.change.rst | 9 ++++ doc/source/reference/arrays.interface.rst | 18 +++++--- numpy/_core/src/multiarray/ctors.c | 28 ++++++++++--- numpy/_core/tests/test_multiarray.py | 41 ++++++++++++++++++- 4 files changed, 84 insertions(+), 12 deletions(-) create mode 100644 doc/release/upcoming_changes/29338.change.rst diff --git a/doc/release/upcoming_changes/29338.change.rst b/doc/release/upcoming_changes/29338.change.rst new file mode 100644 index 000000000000..64bf188009c8 --- /dev/null +++ b/doc/release/upcoming_changes/29338.change.rst @@ -0,0 +1,9 @@ +``__array_interface__`` with NULL pointer changed +------------------------------------------------- +The array interface now accepts NULL pointers (NumPy will do +its own dummy allocation, though). +Previously, these incorrectly triggered an undocumented +scalar path. +In the unlikely event that the scalar path was actually desired, +you can (for now) achieve the previous behavior via the correct +scalar path by not providing a ``data`` field at all. diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index ebe3f6b68918..75c17060c8fc 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -120,7 +120,7 @@ This approach to the interface consists of the object having an **Default**: ``[('', typestr)]`` - **data** (optional) + **data** A 2-tuple whose first argument is a :doc:`Python integer ` that points to the data-area storing the array contents. @@ -136,15 +136,23 @@ This approach to the interface consists of the object having an This attribute can also be an object exposing the :ref:`buffer interface ` which - will be used to share the data. If this key is not present (or - returns None), then memory sharing will be done - through the buffer interface of the object itself. In this + will be used to share the data. If this key is ``None``, then memory sharing + will be done through the buffer interface of the object itself. In this case, the offset key can be used to indicate the start of the buffer. A reference to the object exposing the array interface must be stored by the new object if the memory area is to be secured. - **Default**: ``None`` + .. note:: + Not specifying this field uses a "scalar" path that we may remove in the future + as we are not aware of any users. In this case, NumPy assigns the original object + as a scalar into the array. + + .. versionchanged:: 2.4 + Prior to NumPy 2.4 a ``NULL`` pointer used the undocumented "scalar" path + and was thus usually not accepted (and triggered crashes on some paths). + After NumPy 2.4, ``NULL`` is accepted, although NumPy will create a 1-byte sized + new allocation for the array. **strides** (optional) Either ``None`` to indicate a C-style contiguous array or diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index f7efe5041ab3..ff30e581cd91 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2141,6 +2141,7 @@ PyArray_FromInterface(PyObject *origin) Py_ssize_t i, n; npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; + int use_scalar_assign = 0; if (PyArray_LookupSpecial_OnInstance( origin, npy_interned_str.array_interface, &iface) < 0) { @@ -2229,12 +2230,10 @@ PyArray_FromInterface(PyObject *origin) /* Shape must be specified when 'data' is specified */ int result = PyDict_ContainsString(iface, "data"); if (result < 0) { - Py_DECREF(attr); return NULL; } else if (result == 1) { Py_DECREF(iface); - Py_DECREF(attr); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); return NULL; @@ -2277,7 +2276,10 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through pointer */ - if (attr && PyTuple_Check(attr)) { + if (attr == NULL) { + use_scalar_assign = 1; + } + else if (PyTuple_Check(attr)) { PyObject *dataptr; if (PyTuple_GET_SIZE(attr) != 2) { PyErr_SetString(PyExc_TypeError, @@ -2309,7 +2311,7 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through buffer */ - else if (attr) { + else { if (attr != Py_None) { base = attr; } @@ -2366,18 +2368,32 @@ PyArray_FromInterface(PyObject *origin) if (ret == NULL) { goto fail; } - if (data == NULL) { + if (use_scalar_assign) { + /* + * NOTE(seberg): I honestly doubt anyone is using this scalar path and we + * could probably just deprecate (or just remove it in a 3.0 version). + */ if (PyArray_SIZE(ret) > 1) { PyErr_SetString(PyExc_ValueError, "cannot coerce scalar to array with size > 1"); Py_DECREF(ret); goto fail; } - if (PyArray_SETITEM(ret, PyArray_DATA(ret), origin) < 0) { + if (PyArray_Pack(PyArray_DESCR(ret), PyArray_DATA(ret), origin) < 0) { Py_DECREF(ret); goto fail; } } + else if (data == NULL && PyArray_NBYTES(ret) != 0) { + /* Caller should ensure this, but <2.4 used the above scalar coerction path */ + PyErr_SetString(PyExc_ValueError, + "data is NULL but array contains data, in older versions of NumPy " + "this may have used the scalar path. To get the scalar path " + "you must leave the data field undefined."); + Py_DECREF(ret); + goto fail; + } + result = PyDict_GetItemStringRef(iface, "strides", &attr); if (result == -1){ return NULL; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 930c736c6076..04222025883e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8873,6 +8873,8 @@ def __array_interface__(self): (f, {'strides': ()}, 0.5), (f, {'strides': (2,)}, ValueError), (f, {'strides': 16}, TypeError), + # This fails due to going into the buffer protocol path + (f, {'data': None, 'shape': ()}, TypeError), ]) def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface @@ -8891,13 +8893,50 @@ def test_scalar_interface(self, val, iface, expected): post_cnt = sys.getrefcount(np.dtype('f8')) assert_equal(pre_cnt, post_cnt) -def test_interface_no_shape(): + +def test_interface_empty_shape(): class ArrayLike: array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) +def test_interface_no_shape_error(): + class ArrayLike: + __array_interface__ = {"data": None, "typestr": "f8"} + + with pytest.raises(ValueError, match="Missing __array_interface__ shape"): + np.array(ArrayLike()) + + +@pytest.mark.parametrize("iface", [ + {"typestr": "f8", "shape": (0, 1)}, + {"typestr": "(0,)f8,", "shape": (1, 3)}, +]) +def test_interface_nullptr(iface): + iface.update({"data": (0, True)}) + + class ArrayLike: + __array_interface__ = iface + + arr = np.asarray(ArrayLike()) + # Note, we currently set the base anyway, but we do an allocation + # (because NumPy doesn't like NULL data pointers everywhere). + assert arr.shape == iface["shape"] + assert arr.dtype == np.dtype(iface["typestr"]) + assert arr.base is not None + assert arr.flags.owndata + + +def test_interface_nullptr_size_check(): + # Note that prior to NumPy 2.4 the below took the scalar path (if shape had size 1) + class ArrayLike: + __array_interface__ = {"data": (0, True), "typestr": "f8", "shape": ()} + + with pytest.raises(ValueError, match="data is NULL but array contains data"): + np.array(ArrayLike()) + + def test_array_interface_itemsize(): # See gh-6361 my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], From 4ff1cc12479355ade530ae3add7a19e5db69f2ec Mon Sep 17 00:00:00 2001 From: Toshaksha <147024929+Toshaksha@users.noreply.github.com> Date: Wed, 9 Jul 2025 17:17:04 +0530 Subject: [PATCH 193/294] DOC: Fix Resolution Links in NEPs 38 and 49 (#29347) * Update nep-0049-data-allocation-strategies.rst * Update nep-0038-SIMD-optimizations.rst --- doc/neps/nep-0038-SIMD-optimizations.rst | 2 +- doc/neps/nep-0049-data-allocation-strategies.rst | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/neps/nep-0038-SIMD-optimizations.rst b/doc/neps/nep-0038-SIMD-optimizations.rst index eb1157342948..445c008a76c3 100644 --- a/doc/neps/nep-0038-SIMD-optimizations.rst +++ b/doc/neps/nep-0038-SIMD-optimizations.rst @@ -8,7 +8,7 @@ NEP 38 — Using SIMD optimization instructions for performance :Status: Final :Type: Standards :Created: 2019-11-25 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB/#PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB +:Resolution: `NumPy Discussion `_ Abstract diff --git a/doc/neps/nep-0049-data-allocation-strategies.rst b/doc/neps/nep-0049-data-allocation-strategies.rst index 180cfea17156..ec18f7a315d9 100644 --- a/doc/neps/nep-0049-data-allocation-strategies.rst +++ b/doc/neps/nep-0049-data-allocation-strategies.rst @@ -8,8 +8,7 @@ NEP 49 — Data allocation strategies :Status: Final :Type: Standards Track :Created: 2021-04-18 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/YZ3PNTXZUT27B6ITFAD3WRSM3T3SRVK4/#PKYXCTG4R5Q6LIRZC4SEWLNBM6GLRF26 - +:Resolution: `NumPy Discussion `_ Abstract -------- From 7f8e3fb168a833e81b8ae5315424106e8f1667cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Wed, 9 Jul 2025 16:28:47 +0200 Subject: [PATCH 194/294] BLD: print long double format used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Print the long double format that is used for the build. This can be helpful when you're about to cross-compile, but can also run Meson first on the native host to check the right value. Signed-off-by: Michał Górny --- numpy/_core/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 6098986618e4..1b79aa39781c 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -522,6 +522,7 @@ endif if longdouble_format == 'UNKNOWN' or longdouble_format == 'UNDEFINED' error('Unknown long double format of size: ' + cc.sizeof('long double').to_string()) endif +message(f'Long double format: @longdouble_format@') cdata.set10('HAVE_LDOUBLE_' + longdouble_format, true) if cc.has_header('endian.h') From 8cf1f7dbf583dd7f55906e799a1108d485f54709 Mon Sep 17 00:00:00 2001 From: Carlos Martin Date: Thu, 10 Jul 2025 03:18:44 -0400 Subject: [PATCH 195/294] ENH: Let numpy.size accept multiple axes. (#29240) * Let numpy.size accept multiple axes. * Apply suggestions from code review --------- Co-authored-by: Sebastian Berg --- .../upcoming_changes/29240.new_feature.rst | 1 + numpy/_core/fromnumeric.py | 22 ++++++++++++------- numpy/_core/fromnumeric.pyi | 2 +- numpy/_core/tests/test_numeric.py | 4 ++++ 4 files changed, 20 insertions(+), 9 deletions(-) create mode 100644 doc/release/upcoming_changes/29240.new_feature.rst diff --git a/doc/release/upcoming_changes/29240.new_feature.rst b/doc/release/upcoming_changes/29240.new_feature.rst new file mode 100644 index 000000000000..02d43364b200 --- /dev/null +++ b/doc/release/upcoming_changes/29240.new_feature.rst @@ -0,0 +1 @@ +* Let ``np.size`` accept multiple axes. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 34fe1798f45e..9d01fca8aa32 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2,6 +2,7 @@ """ import functools +import math import types import warnings @@ -3569,10 +3570,13 @@ def size(a, axis=None): ---------- a : array_like Input data. - axis : int, optional - Axis along which the elements are counted. By default, give + axis : None or int or tuple of ints, optional + Axis or axes along which the elements are counted. By default, give the total number of elements. + .. versionchanged:: 2.4 + Extended to accept multiple axes. + Returns ------- element_count : int @@ -3590,10 +3594,12 @@ def size(a, axis=None): >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 - >>> np.size(a,1) + >>> np.size(a,axis=1) 3 - >>> np.size(a,0) + >>> np.size(a,axis=0) 2 + >>> np.size(a,axis=(0,1)) + 6 """ if axis is None: @@ -3602,10 +3608,10 @@ def size(a, axis=None): except AttributeError: return asarray(a).size else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] + _shape = shape(a) + from .numeric import normalize_axis_tuple + axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False) + return math.prod(_shape[ax] for ax in axis) def _round_dispatcher(a, decimals=None, out=None): diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 34849c2cc800..95fe7f7d8484 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1397,7 +1397,7 @@ def cumulative_prod( def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: int | None = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = ...) -> int: ... @overload def around( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 4ead2fc7ec6f..560d7cf71543 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -291,6 +291,10 @@ def test_size(self): assert_(np.size(A) == 6) assert_(np.size(A, 0) == 2) assert_(np.size(A, 1) == 3) + assert_(np.size(A, ()) == 1) + assert_(np.size(A, (0,)) == 2) + assert_(np.size(A, (1,)) == 3) + assert_(np.size(A, (0, 1)) == 6) def test_squeeze(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] From f68522879dd5cd3f6eb45fd3ec971561e76915c6 Mon Sep 17 00:00:00 2001 From: Mark Ryan Date: Thu, 10 Jul 2025 09:52:26 +0000 Subject: [PATCH 196/294] BUG: fix test_npy_uintp_type_enum test_npy_uintp_type_enum will fail if it is run before any of the other tests in test_cython.py. The reason is that although it imports the checks module, it does not contain the install_temp parameter that ensures that the checks module is compiled. Running test_npy_uintp_type_enum before any of the other tests in test_cython.py will result in a No module named 'checks' error. If the test is run after one of the other tests in test_cython.py that do contain the install_temp parameter, test_npy_uintp_type_enum will run fine. Here we fix the issue by adding the install_temp parameter to test_npy_uintp_type_enum. Closes #29354 --- numpy/_core/tests/test_cython.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 2c7b40c5614c..c405a59e535e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -347,6 +347,6 @@ def test_npystring_allocators_other_dtype(install_temp): @pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') -def test_npy_uintp_type_enum(): +def test_npy_uintp_type_enum(install_temp): import checks assert checks.check_npy_uintp_type_enum() From 82bd6ef31ce57859280be7591c3a20f562e69d34 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Fri, 11 Jul 2025 10:16:59 -0400 Subject: [PATCH 197/294] BUG: Fix reference leakage for output arrays in reduction functions (#29358) Add back missing DECREF for `out=` in ufunc.reduce (and etc.) when `out` is passed. * BUG: add Py_XDECREF to `out` for failure case in generic reduction * TST: add reference count checks for `out` in reduction leak tests --- numpy/_core/src/umath/ufunc_object.c | 4 +++ numpy/_core/tests/test_ufunc.py | 39 ++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 485364af1ff2..fd9ae9f1e41d 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -3727,6 +3727,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (ret == NULL) { goto fail; } + + Py_XDECREF(out); Py_DECREF(signature[0]); Py_DECREF(signature[1]); @@ -3753,6 +3755,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, return wrapped_result; fail: + Py_XDECREF(out); + Py_XDECREF(signature[0]); Py_XDECREF(signature[1]); Py_XDECREF(signature[2]); diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 9c5489a614e8..84e31bd1bc3c 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -3003,6 +3003,45 @@ def test_reduce_casterrors(offset): assert out[()] < value * offset +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + def test_object_reduce_cleanup_on_failure(): # Test cleanup, including of the initial value (manually provided or not) with pytest.raises(TypeError): From 031377b7bd547c3e07702cf273cf9fecd8cb46fa Mon Sep 17 00:00:00 2001 From: Dillon Niederhut Date: Sun, 13 Jul 2025 06:56:44 -0500 Subject: [PATCH 198/294] TST: refactor typing check for @ (#29364) Co-authored-by: Joren Hammudoglu --- numpy/typing/tests/data/pass/simple.py | 4 +++- numpy/typing/tests/data/pass/simple_py3.py | 6 ------ 2 files changed, 3 insertions(+), 7 deletions(-) delete mode 100644 numpy/typing/tests/data/pass/simple_py3.py diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 8f44e6e76f83..408d2482b0ef 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -165,4 +165,6 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: ~array # Other methods -np.array([1, 2]).transpose() +array.transpose() + +array @ array diff --git a/numpy/typing/tests/data/pass/simple_py3.py b/numpy/typing/tests/data/pass/simple_py3.py deleted file mode 100644 index c05a1ce612ac..000000000000 --- a/numpy/typing/tests/data/pass/simple_py3.py +++ /dev/null @@ -1,6 +0,0 @@ -import numpy as np - -array = np.array([1, 2]) - -# The @ operator is not in python 2 -array @ array From 3ccbc6c29bb56795d32ee685bccdc1e9ca8ae47f Mon Sep 17 00:00:00 2001 From: Iason Krommydas Date: Sun, 13 Jul 2025 08:28:40 -0700 Subject: [PATCH 199/294] DOC: specify that `numpy.nan_to_num` supports array like arguments (#29362) * DOC: specify that numpy.nan_to_num supports array_like arguments * DOC: split examples into multiple lines * DOC: more consistent type specification --- numpy/lib/_type_check_impl.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 977609caa299..584088cdc21d 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -398,15 +398,15 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. - nan : int, float, optional - Value to be used to fill NaN values. If no value is passed + nan : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill NaN values. If no values are passed then NaN values will be replaced with 0.0. - posinf : int, float, optional - Value to be used to fill positive infinity values. If no value is + posinf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill positive infinity values. If no values are passed then positive infinity values will be replaced with a very large number. - neginf : int, float, optional - Value to be used to fill negative infinity values. If no value is + neginf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill negative infinity values. If no values are passed then negative infinity values will be replaced with a very small (or negative) number. @@ -445,6 +445,12 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) + >>> nan = np.array([11, 12, -9999, 13, 14]) + >>> posinf = np.array([33333333, 11, 12, 13, 14]) + >>> neginf = np.array([11, 33333333, 12, 13, 14]) + >>> np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, + 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) @@ -454,6 +460,11 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): 0.00000000e+000 +1.79769313e+308j]) >>> np.nan_to_num(y, nan=111111, posinf=222222) array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + >>> nan = np.array([11, 12, 13]) + >>> posinf = np.array([21, 22, 23]) + >>> neginf = np.array([31, 32, 33]) + >>> np.nan_to_num(y, nan=nan, posinf=posinf, neginf=neginf) + array([21.+11.j, 12. +0.j, 13.+23.j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type From 08916dc1f34de32ccce6b1018613de98c3cc68ff Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Sun, 13 Jul 2025 11:35:49 -0700 Subject: [PATCH 200/294] BUG: Fix np.testing utils failing for masked scalar vs. scalar (#29317) (#29318) * TST: Add failing test TestArrayEqual.test_masked_scalar The two added test cases fail with: E AssertionError: E Arrays are not equal E E nan location mismatch: E ACTUAL: MaskedArray(3.) E DESIRED: array(3.) and E AssertionError: E Arrays are not equal E E nan location mismatch: E ACTUAL: MaskedArray(3.) E DESIRED: array(nan) * BUG: Fix np.testing utils failing for masked scalar vs. scalar (#29317) TestArrayEqual.test_masked_scalar now passes. This case regressed since 73151451437 (merged in #12119) due to: - ` == ` returning np.ma.masked (not a 0-dim masked bool array), followed by - `np.bool(np.ma.masked)` unintentionally converting it to np._False Note on the modified comment: Confusingly, "isinstance(..., bool) checks" in the previous wording actually incorrectly referred to the ones towards the end of the function, which are not actually related to __eq__'s behavior but to the possibility of `func` returning a bool. * MNT: Improve comments on assert_array_compare nan/inf handling logic - Use same language as elsewhere below to explain `!= True` used to handle np.ma.masked - Clarify committed to support standard MaskedArrays - Restore note lost in 73151451437 comment changes about how the np.bool casts towards the end of the function handle np.ma.masked, and expand further. * TST: Expand TestArrayEqual.test_masked_scalar --- numpy/testing/_private/utils.py | 19 +++++++++++------ numpy/testing/tests/test_utils.py | 34 +++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 78f9e9a004b1..e868239e62ab 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -771,16 +771,20 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): y_id = func(y) # We include work-arounds here to handle three types of slightly # pathological ndarray subclasses: - # (1) all() on `masked` array scalars can return masked arrays, so we - # use != True + # (1) all() on fully masked arrays returns np.ma.masked, so we use != True + # (np.ma.masked != True evaluates as np.ma.masked, which is falsy). # (2) __eq__ on some ndarray subclasses returns Python booleans - # instead of element-wise comparisons, so we cast to np.bool() and - # use isinstance(..., bool) checks + # instead of element-wise comparisons, so we cast to np.bool() in + # that case (or in case __eq__ returns some other value with no + # all() method). # (3) subclasses with bare-bones __array_function__ implementations may # not implement np.all(), so favor using the .all() method - # We are not committed to supporting such subclasses, but it's nice to + # We are not committed to supporting cases (2) and (3), but it's nice to # support them if possible. - if np.bool(x_id == y_id).all() != True: + result = x_id == y_id + if not hasattr(result, "all") or not callable(result.all): + result = np.bool(result) + if result.all() != True: msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -790,6 +794,9 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise AssertionError(msg) # If there is a scalar, then here we know the array has the same # flag as it everywhere, so we should return the scalar flag. + # np.ma.masked is also handled and converted to np.False_ (even if the other + # array has nans/infs etc.; that's OK given the handling later of fully-masked + # results). if isinstance(x_id, bool) or x_id.ndim == 0: return np.bool(x_id) elif isinstance(y_id, bool) or y_id.ndim == 0: diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index b09df821680b..243b8d420936 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -197,6 +197,40 @@ def test_masked_nan_inf(self): self._test_equal(a, b) self._test_equal(b, a) + # Also provides test cases for gh-11121 + def test_masked_scalar(self): + # Test masked scalar vs. plain/masked scalar + for a_val, b_val, b_masked in itertools.product( + [3., np.nan, np.inf], + [3., 4., np.nan, np.inf, -np.inf], + [False, True], + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. plain array + for a_val, b_val in itertools.product( + [3., np.nan, -np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. masked array + for a_val, b_val, b_mask in itertools.product( + [3., np.nan, np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + itertools.product([False, True], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=b_mask) + self._test_equal(a, b) + self._test_equal(b, a) + def test_subclass_that_overrides_eq(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having From 81be692aacdae64962b0c847ba25c6db6ca7111b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 14 Jul 2025 11:14:24 +0300 Subject: [PATCH 201/294] BLD: use github to build macos-arm64 wheels with OpenBLAS and update to 0.3.30 --- .cirrus.star | 18 ----- .github/workflows/wheels.yml | 5 +- requirements/ci32_requirements.txt | 3 +- requirements/ci_requirements.txt | 6 +- tools/ci/cirrus_wheels.yml | 118 ----------------------------- 5 files changed, 5 insertions(+), 145 deletions(-) delete mode 100644 tools/ci/cirrus_wheels.yml diff --git a/.cirrus.star b/.cirrus.star index c503f25720a7..3de5ce97b0e8 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -9,17 +9,12 @@ load("cirrus", "env", "fs", "http") def main(ctx): ###################################################################### - # Should wheels be built? # Only test on the numpy/numpy repository ###################################################################### if env.get("CIRRUS_REPO_FULL_NAME") != "numpy/numpy": return [] - # only run the wheels entry on a cron job - if env.get("CIRRUS_CRON", "") == "nightly": - return fs.read("tools/ci/cirrus_wheels.yml") - # Obtain commit message for the event. Unfortunately CIRRUS_CHANGE_MESSAGE # only contains the actual commit message on a non-PR trigger event. # For a PR event it contains the PR title and description. @@ -31,23 +26,10 @@ def main(ctx): if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: return [] - wheel = False labels = env.get("CIRRUS_PR_LABELS", "") pr_number = env.get("CIRRUS_PR", "-1") tag = env.get("CIRRUS_TAG", "") - if "[wheel build]" in commit_msg: - wheel = True - - # if int(pr_number) > 0 and ("14 - Release" in labels or "36 - Build" in labels): - # wheel = True - - if tag.startswith("v") and "dev0" not in tag: - wheel = True - - if wheel: - return fs.read("tools/ci/cirrus_wheels.yml") - if int(pr_number) < 0: return [] diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 223ec38898cf..fd2047283a1f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -85,10 +85,9 @@ jobs: - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - [macos-13, macosx_x86_64, openblas] - - # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - [macos-13, macosx_x86_64, accelerate] - - [macos-14, macosx_arm64, accelerate] # always use accelerate + - [macos-14, macosx_arm64, openblas] + - [macos-14, macosx_arm64, accelerate] - [windows-2022, win_amd64, ""] - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 74c9a51ec111..87831586e01e 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,4 +1,3 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' -scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' +scipy-openblas32==0.3.30.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index b6ea06c812c8..dd16787923e3 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,6 +1,4 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' -scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' -# Note there is not yet a win-arm64 wheel, so we currently only exclude win-arm64 -scipy-openblas64==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.30.0.1 +scipy-openblas64==0.3.30.0.1 diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml deleted file mode 100644 index 6d02411df2e9..000000000000 --- a/tools/ci/cirrus_wheels.yml +++ /dev/null @@ -1,118 +0,0 @@ -###################################################################### -# Build macosx_arm64 natively -# -# macosx_arm64 for macos >= 14 used to be built here, but are now -# built on GHA. -###################################################################### - -macosx_arm64_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - env: - CIRRUS_CLONE_SUBMODULES: true - macos_instance: - matrix: - image: ghcr.io/cirruslabs/macos-runner:sonoma - - matrix: - - env: - CIBW_BUILD: cp311-* cp312* cp313* - env: - PATH: /usr/local/lib:/usr/local/include:$PATH - CIBW_ARCHS: arm64 - - build_script: | - brew install micromamba gfortran - micromamba shell init -s bash --root-prefix ~/micromamba - source ~/.bash_profile - - micromamba create -n numpydev - micromamba activate numpydev - micromamba install -y -c conda-forge python=3.11 2>/dev/null - - # Use scipy-openblas wheels - export INSTALL_OPENBLAS=true - export CIBW_ENVIRONMENT_MACOS="MACOSX_DEPLOYMENT_TARGET='11.0' INSTALL_OPENBLAS=true RUNNER_OS=macOS PKG_CONFIG_PATH=$PWD/.openblas" - - # needed for submodules - git submodule update --init - # need to obtain all the tags so setup.py can determine FULLVERSION - git fetch origin - uname -m - python -c "import platform;print(platform.python_version());print(platform.system());print(platform.machine())" - clang --version - - python -m pip install cibuildwheel - cibuildwheel - - wheels_artifacts: - path: "wheelhouse/*" - -###################################################################### -# Upload all wheels -###################################################################### - -wheels_upload_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - # Artifacts don't seem to be persistent from task to task. - # Rather than upload wheels at the end of each cibuildwheel run we do a - # final upload here. This is because a run may be on different OS for - # which bash, etc, may not be present. - depends_on: - - macosx_arm64 - compute_engine_instance: - image_project: cirrus-images - image: family/docker-builder - platform: linux - cpu: 1 - - env: - NUMPY_STAGING_UPLOAD_TOKEN: ENCRYPTED[!5a69522ae0c2af9edb2bc1cdfeaca6292fb3666d9ecd82dca0615921834a6ce3b702352835d8bde4ea2a9ed5ef8424ac!] - NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[4376691390321cd5e76613ec21de8456cc0af0164971dd9542f985a017dc30ccb4d40e60f59184618e2d55afd63e93b7] - - upload_script: | - apt-get update - apt-get install -y curl wget - export IS_SCHEDULE_DISPATCH="false" - export IS_PUSH="false" - - # cron job - if [[ "$CIRRUS_CRON" == "nightly" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # a manual build was started - if [[ "$CIRRUS_BUILD_SOURCE" == "api" && "$CIRRUS_COMMIT_MESSAGE" == "API build for null" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # only upload wheels to staging if it's a tag beginning with 'v' and you're - # on a maintenance branch - if [[ "$CIRRUS_TAG" == v* ]] && [[ $CIRRUS_TAG != *"dev0"* ]]; then - export IS_PUSH="true" - fi - - if [[ $IS_PUSH == "true" ]] || [[ $IS_SCHEDULE_DISPATCH == "true" ]]; then - # install miniconda in the home directory. For some reason HOME isn't set by Cirrus - export HOME=$PWD - - # install miniconda for uploading to anaconda - wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda3 - $HOME/miniconda3/bin/conda init bash - source $HOME/miniconda3/bin/activate - conda install -y anaconda-client - - # The name of the zip file is derived from the `wheels_artifact` line. - # If you change the artifact line to `myfile_artifact` then it would be - # called myfile.zip - - curl https://api.cirrus-ci.com/v1/artifact/build/$CIRRUS_BUILD_ID/wheels.zip --output wheels.zip - unzip wheels.zip - - source ./tools/wheels/upload_wheels.sh - # IS_PUSH takes precedence over IS_SCHEDULE_DISPATCH - set_upload_vars - - # Will be skipped if not a push/tag/scheduled build - upload_wheels - fi From 8b43d86afcecf74b51d2a96425a64363b207e4a1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 14 Jul 2025 01:17:55 -0700 Subject: [PATCH 202/294] BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 6 +++--- numpy/_core/tests/test_strings.py | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index b0181d4186c9..ca574f605c1a 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1736,7 +1736,7 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, size_t num_codepoints = inbuf.num_codepoints(); npy_intp width = (npy_intp)*(npy_int64*)in2; - if (num_codepoints > (size_t)width) { + if ((npy_intp)num_codepoints > width) { width = num_codepoints; } @@ -1866,8 +1866,8 @@ zfill_strided_loop(PyArrayMethod_Context *context, { Buffer inbuf((char *)is.buf, is.size); size_t in_codepoints = inbuf.num_codepoints(); - size_t width = (size_t)*(npy_int64 *)in2; - if (in_codepoints > width) { + npy_intp width = (npy_intp)*(npy_int64*)in2; + if ((npy_intp)in_codepoints > width) { width = in_codepoints; } // number of leading one-byte characters plus the size of the diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 1b77a535eee6..756c6e1bb549 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -846,6 +846,7 @@ def test_rjust_raises_multiple_character_fill(self, dt): ('abc', 6, ' ', ' abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '***abc****'), ]) def test_center(self, buf, width, fillchar, res, dt): @@ -859,6 +860,7 @@ def test_center(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', 'abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', 'abc*******'), ]) def test_ljust(self, buf, width, fillchar, res, dt): @@ -872,6 +874,7 @@ def test_ljust(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', ' abc'), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '*******abc'), ]) def test_rjust(self, buf, width, fillchar, res, dt): @@ -893,6 +896,7 @@ def test_rjust(self, buf, width, fillchar, res, dt): ('-0123', 5, '-0123'), ('000', 3, '000'), ('34', 1, '34'), + ('34', -1, '34'), ('0034', 4, '0034'), ]) def test_zfill(self, buf, width, res, dt): From 9173e548de3541077a9b50b0f4dff7eceb9354fd Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 14 Jul 2025 12:46:38 +0200 Subject: [PATCH 203/294] MNT: Options to catch more issues reported by pytest (#28983) Apply Repo-Review suggestions PP306, PP307, PP308. Skip PP305 for now. --- pytest.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 132af0bb78ab..b8a1da2b4ec6 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -l +addopts = -l -ra --strict-markers --strict-config norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators numpy/_core/src/common/pythoncapi-compat doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES junit_family=xunit2 From 0c203ed4d6ccfb44efef41e87fe5819dbeb7d822 Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Mon, 14 Jul 2025 18:14:31 +0100 Subject: [PATCH 204/294] DEV: remove "packages" from `.gitignore` not necessary now (and can interfere with niche build circumstances https://github.com/prefix-dev/pixi-build-backends/issues/243) --- .gitignore | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/.gitignore b/.gitignore index c4de68c1a9a7..b54de4091bf3 100644 --- a/.gitignore +++ b/.gitignore @@ -43,23 +43,6 @@ GTAGS *.so *.mod -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.bz2 -*.bzip2 -*.dmg -*.gz -*.iso -*.jar -*.rar -*.tar -*.tbz2 -*.tgz -*.zip - # Python files # ################ # meson build/installation directories From 7bb8b9f3df8661d9fbea05b55f00717fde10b8c4 Mon Sep 17 00:00:00 2001 From: Sarang Joshi Date: Mon, 14 Jul 2025 12:18:12 -0500 Subject: [PATCH 205/294] Fix typo in npy_cpu_dispatch.c --- numpy/_core/src/common/npy_cpu_dispatch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index ff22f234a7c6..47b8d2b6f4c2 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -9,7 +9,7 @@ NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { if (npy_static_pydata.cpu_dispatch_registry != NULL) { - PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); + PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initialized"); return -1; } PyObject *mod_dict = PyModule_GetDict(mod); From 9a0086b8d22822e62db08d04add2d2c568346137 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 14 Jul 2025 23:52:44 +0100 Subject: [PATCH 206/294] TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray`` for ``unicode`` argument (#29377) --- numpy/_core/defchararray.pyi | 70 +++++++++++++++++++++---- numpy/typing/tests/data/reveal/char.pyi | 6 +++ 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 680875240096..d3e01c2c820a 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1044,14 +1044,15 @@ def startswith( def str_len(A: UST_co) -> NDArray[int_]: ... # Overload 1 and 2: str- or bytes-based array-likes -# overload 3: arbitrary object with unicode=False (-> bytes_) -# overload 4: arbitrary object with unicode=True (-> str_) +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload def array( obj: U_co, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + unicode: L[True] | None = ..., order: _OrderKACF = ..., ) -> _CharArray[str_]: ... @overload @@ -1059,7 +1060,15 @@ def array( obj: S_co, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload @@ -1067,43 +1076,84 @@ def array( obj: object, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + *, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload def array( obj: object, itemsize: int | None = ..., copy: bool = ..., - unicode: L[True] = ..., + *, + unicode: L[True], order: _OrderKACF = ..., ) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload def asarray( obj: U_co, itemsize: int | None = ..., - unicode: L[False] = ..., + unicode: L[True] | None = ..., order: _OrderKACF = ..., ) -> _CharArray[str_]: ... @overload def asarray( obj: S_co, itemsize: int | None = ..., - unicode: L[False] = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, itemsize: int | None = ..., - unicode: L[False] = ..., + *, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload def asarray( obj: object, itemsize: int | None = ..., - unicode: L[True] = ..., + *, + unicode: L[True], order: _OrderKACF = ..., ) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 9fdc9f61e893..5c6af73888d0 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -209,6 +209,9 @@ assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, n assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) @@ -216,3 +219,6 @@ assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[n assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) From d32370539ea5aa93100842a98cf21d55667b6041 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Tue, 15 Jul 2025 11:42:04 -0700 Subject: [PATCH 207/294] BUG: Fix repeatability issues in test suite (#29380) Resolves test failures caused by persistent global state when running numpy.test() twice in the same Python session. - core/tests: Skip TestAddNewdocUfunc on repeated runs. This avoids C-level mutation errors. - f2py/tests: Reset f77modulename at end of callcrackfortran. This change alters the code base. - lib/tests: Wrap StringIO parameters in lambdas. - f2py/tests/test_docs: Reset ftype.data.a at end of test_ftype. These changes ensure that the full test suite can be run multiple times in the same interpreter without failure. Closes #26718. --- numpy/_core/tests/test_deprecations.py | 7 +++++++ numpy/f2py/f2py2e.py | 2 ++ numpy/f2py/tests/test_docs.py | 2 ++ numpy/lib/tests/test_io.py | 8 ++++++-- 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index b83a2ac610ee..d4f2e9984266 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -448,6 +448,13 @@ def test_deprecated(self): class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 def test_deprecated(self): + doc = struct_ufunc.add_triplet.__doc__ + # gh-26718 + # This test mutates the C-level docstring pointer for add_triplet, + # which is permanent once set. Skip when re-running tests. + if doc is not None and "new docs" in doc: + pytest.skip("Cannot retest deprecation, otherwise ValueError: " + "Cannot change docstring of ufunc with non-NULL docstring") self.assert_deprecated( lambda: np._core.umath._add_newdoc_ufunc( struct_ufunc.add_triplet, "new docs" diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 459299f8e127..84a5aa3c20a6 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -378,6 +378,8 @@ def callcrackfortran(files, options): mod['gil_used'] = 'Py_MOD_GIL_USED' else: mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + # gh-26718 Reset global + crackfortran.f77modulename = '' return postlist diff --git a/numpy/f2py/tests/test_docs.py b/numpy/f2py/tests/test_docs.py index 5d9aaac9f15b..7015af3b2627 100644 --- a/numpy/f2py/tests/test_docs.py +++ b/numpy/f2py/tests/test_docs.py @@ -60,5 +60,7 @@ def test_ftype(self): ftype.data.x[1] = 45 assert_array_equal(ftype.data.x, np.array([1, 45, 3], dtype=np.float32)) + # gh-26718 Cleanup for repeated test runs + ftype.data.a = 0 # TODO: implement test methods for other example Fortran codes diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index a1698f240bff..b8c8e6cfcac1 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1276,12 +1276,16 @@ def test_max_rows_larger(self): (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), # "Bad" lines that do not end in newlines: (1, ["ignored", "1,2", "", "3,4"]), - (1, StringIO("ignored\n1,2\n\n3,4")), + (1, lambda: StringIO("ignored\n1,2\n\n3,4")), # Same as above, but do not skip any lines: (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), (0, ["-1,0", "1,2", "", "3,4"]), - (0, StringIO("-1,0\n1,2\n\n3,4"))]) + (0, lambda: StringIO("-1,0\n1,2\n\n3,4"))]) def test_max_rows_empty_lines(self, skip, data): + # gh-26718 re-instantiate StringIO objects each time + if callable(data): + data = data() + with pytest.warns(UserWarning, match=f"Input line 3.*max_rows={3 - skip}"): res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", From f37ffca5c734e6c4ae29109a1889429f1f59805b Mon Sep 17 00:00:00 2001 From: Verney7 Date: Wed, 16 Jul 2025 14:33:20 +0800 Subject: [PATCH 208/294] BLD: Add sw_64 support --- numpy/_core/include/numpy/npy_cpu.h | 3 +++ numpy/_core/include/numpy/npy_endian.h | 1 + 2 files changed, 4 insertions(+) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 52e9d5996bd1..d3a29da57f36 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -20,6 +20,7 @@ * NPY_CPU_RISCV64 * NPY_CPU_RISCV32 * NPY_CPU_LOONGARCH + * NPY_CPU_SW_64 * NPY_CPU_WASM */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ @@ -111,6 +112,8 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 +#elif defined(__sw_64__) + #define NPY_CPU_SW_64 #elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ /* __wasm__ is defined by clang when targeting wasm */ diff --git a/numpy/_core/include/numpy/npy_endian.h b/numpy/_core/include/numpy/npy_endian.h index 09262120bf82..ecb4b000763d 100644 --- a/numpy/_core/include/numpy/npy_endian.h +++ b/numpy/_core/include/numpy/npy_endian.h @@ -51,6 +51,7 @@ || defined(NPY_CPU_RISCV64) \ || defined(NPY_CPU_RISCV32) \ || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_SW_64) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN From 685918f57bb2591f1ad14dedc7f2920cc533e4f8 Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Wed, 16 Jul 2025 21:52:57 +0900 Subject: [PATCH 209/294] DOC: Fix `PyArrayMapIterObject` document (#29386) The structure of `PyArrayMapIterObject` defined in `multiarray/mapping.h`, not in `arrayobject.h`. --- doc/source/reference/c-api/types-and-structures.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 3f16b5f4dbc4..fb8efb9c8766 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1608,7 +1608,7 @@ for completeness and assistance in understanding the code. The C-structure associated with :c:var:`PyArrayMapIter_Type`. This structure is useful if you are trying to understand the advanced-index mapping code. It is defined in the - ``arrayobject.h`` header. This type is not exposed to Python and + ``multiarray/mapping.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. From 0fee7d82b176fde51be69fe06fec638768f834f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:33:12 +0100 Subject: [PATCH 210/294] TYP: Type `MaskedArray.{sum,std,var,mean,prod}` (#29381) --- numpy/ma/core.pyi | 152 ++++++++++++++++++- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- numpy/typing/tests/data/reveal/ma.pyi | 25 +++ 3 files changed, 174 insertions(+), 7 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 63dea396de66..ee3b90f2891d 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1206,7 +1206,34 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... - def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... # Keep in sync with `ndarray.cumsum` @overload # out: None (default) @@ -1216,7 +1243,35 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + product: Any # Keep in sync with `ndarray.cumprod` @@ -1227,7 +1282,33 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... @overload def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... @@ -1236,8 +1317,69 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... - def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... # Keep in sync with `ndarray.round` @overload # out=None (default) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index bb290cdf12f7..b3428f6b48c1 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -119,7 +119,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.std() A.std() A.std(axis=0) -A.std(keepdims=True) +A.std(keepdims=True, mean=0.) A.std(out=B0.astype(np.float64)) i4.sum() @@ -137,7 +137,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.var() A.var() A.var(axis=0) -A.var(keepdims=True) +A.var(keepdims=True, mean=0.) A.var(out=B0) A.argpartition([0]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 7c2cb9d5f05e..b5a292330208 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -743,6 +743,31 @@ assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] +assert_type(MAR_f8.sum(), Any) +assert_type(MAR_f8.sum(axis=0), Any) +assert_type(MAR_f8.sum(keepdims=True), Any) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.std(), Any) +assert_type(MAR_f8.std(axis=0), Any) +assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.var(), Any) +assert_type(MAR_f8.var(axis=0), Any) +assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.mean(), Any) +assert_type(MAR_f8.mean(axis=0), Any) +assert_type(MAR_f8.mean(keepdims=True), Any) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.prod(), Any) +assert_type(MAR_f8.prod(axis=0), Any) +assert_type(MAR_f8.prod(keepdims=True), Any) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclass) + # MaskedArray "true" division assert_type(MAR_f8 / b, MaskedArray[np.float64]) From 6e48cff51b4ea3c002ca86ca4835a723338dbf54 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 16 Jul 2025 17:50:07 +0100 Subject: [PATCH 211/294] DOC: document `mean` parameter in ``ndarray.std`` and ``ndarray.var`` (#29387) --- numpy/_core/_add_newdocs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 597d5c6deaf3..90d33d4b810a 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4240,7 +4240,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4518,7 +4518,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) Returns the variance of the array elements, along given axis. From b5d6eb4300f782f67dde893dc72f2dca94945b6c Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:37:09 -0400 Subject: [PATCH 212/294] MNT: add linter for thread-unsafe C API uses (#29371) * MNT: add extra lint - thread-unsafe C API lint * MNT: integrate ruff-lint with C-API-lint --- numpy/_core/include/numpy/npy_3kcompat.h | 2 +- numpy/_core/src/common/npy_cpu_dispatch.c | 2 +- numpy/_core/src/common/ufunc_override.c | 2 +- .../src/multiarray/_multiarray_tests.c.src | 8 +- numpy/_core/src/multiarray/array_coercion.c | 2 +- .../src/multiarray/arrayfunction_override.c | 4 +- numpy/_core/src/multiarray/arraytypes.c.src | 12 +-- numpy/_core/src/multiarray/buffer.c | 2 +- numpy/_core/src/multiarray/compiled_base.c | 2 +- numpy/_core/src/multiarray/conversion_utils.c | 4 +- numpy/_core/src/multiarray/convert_datatype.c | 10 +- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 44 ++++----- numpy/_core/src/multiarray/dtype_transfer.c | 10 +- numpy/_core/src/multiarray/dtype_traversal.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/src/multiarray/hashdescr.c | 2 +- numpy/_core/src/multiarray/iterators.c | 2 +- .../multiarray/legacy_dtype_implementation.c | 6 +- numpy/_core/src/multiarray/mapping.c | 2 +- numpy/_core/src/multiarray/methods.c | 10 +- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/nditer_pywrap.c | 2 +- numpy/_core/src/multiarray/refcount.c | 6 +- numpy/_core/src/multiarray/textreading/rows.c | 2 +- numpy/_core/src/multiarray/usertypes.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 8 +- numpy/_core/src/umath/ufunc_type_resolution.c | 6 +- numpy/_core/src/umath/umathmodule.c | 6 +- numpy/f2py/src/fortranobject.c | 8 +- tools/ci/check_c_api_usage.sh | 91 +++++++++++++++++++ tools/linter.py | 28 +++++- 32 files changed, 204 insertions(+), 89 deletions(-) create mode 100644 tools/ci/check_c_api_usage.sh diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index c2bf74faf09d..cd91f66268c7 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -242,7 +242,7 @@ static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK if (open == NULL) { return NULL; } diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 47b8d2b6f4c2..2cb3cd817d2a 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -33,7 +33,7 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); + PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); // noqa: borrowed-ref OK if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index e98315f14a94..0bcbea5baa30 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -108,7 +108,7 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * * PySequence_Fast* functions. This is required for PyPy */ PyObject *seq; - seq = PySequence_Fast(*out_kwd_obj, + seq = PySequence_Fast(*out_kwd_obj, // noqa: borrowed-ref OK "Could not convert object to sequence"); if (seq == NULL) { Py_CLEAR(*out_kwd_obj); diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 8012a32b070e..068fabc7fee8 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -644,7 +644,7 @@ incref_elide_l(PyObject *dummy, PyObject *args) } /* get item without increasing refcount, item may still be on the python * stack but above the inaccessible top */ - r = PyList_GetItem(arg, 4); + r = PyList_GetItem(arg, 4); // noqa: borrowed-ref OK res = PyNumber_Add(r, r); return res; @@ -863,7 +863,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) if (classes == NULL) { goto fail; } - Py_SETREF(classes, PySequence_Fast(classes, NULL)); + Py_SETREF(classes, PySequence_Fast(classes, NULL)); // noqa: borrowed-ref OK if (classes == NULL) { goto fail; } @@ -883,7 +883,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) PyObject *to_dtype, *cast_obj; Py_ssize_t pos = 0; - while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, + while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, // noqa: borrowed-ref OK &pos, &to_dtype, &cast_obj)) { if (cast_obj == Py_None) { continue; @@ -965,7 +965,7 @@ identityhash_tester(PyObject *NPY_UNUSED(mod), } /* Replace the sequence with a guaranteed fast-sequence */ - sequence = PySequence_Fast(sequence, "converting sequence."); + sequence = PySequence_Fast(sequence, "converting sequence."); // noqa: borrowed-ref OK if (sequence == NULL) { goto finish; } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index ff7d98bd9c64..8271fb6812d1 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -1148,7 +1148,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( force_sequence_due_to_char_dtype: /* Ensure we have a sequence (required for PyPy) */ - seq = PySequence_Fast(obj, "Could not convert object to sequence"); + seq = PySequence_Fast(obj, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (seq == NULL) { /* * Specifically do not fail on things that look like a dictionary, diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 9834ab138cf6..72211e2a6d62 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -371,7 +371,7 @@ array__get_implementing_args( return NULL; } - relevant_args = PySequence_Fast( + relevant_args = PySequence_Fast( // noqa: borrowed-ref OK relevant_args, "dispatcher for __array_function__ did not return an iterable"); if (relevant_args == NULL) { @@ -518,7 +518,7 @@ dispatcher_vectorcall(PyArray_ArrayFunctionDispatcherObject *self, fix_name_if_typeerror(self); return NULL; } - Py_SETREF(relevant_args, PySequence_Fast(relevant_args, + Py_SETREF(relevant_args, PySequence_Fast(relevant_args, // noqa: borrowed-ref OK "dispatcher for __array_function__ did not return an iterable")); if (relevant_args == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 9e5588f98a83..52c9bdfb6bcc 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -881,7 +881,7 @@ VOID_getitem(void *input, void *vap) npy_intp offset; PyArray_Descr *new; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); + tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { Py_DECREF(ret); return NULL; @@ -973,7 +973,7 @@ _setup_field(int i, _PyArray_LegacyDescr *descr, PyArrayObject *arr, npy_intp offset; key = PyTuple_GET_ITEM(descr->names, i); - tup = PyDict_GetItem(descr->fields, key); + tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { return -1; } @@ -2274,7 +2274,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr *new; if (NPY_TITLE_KEY(key, value)) { @@ -2359,7 +2359,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr * new; @@ -2679,7 +2679,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK PyArray_Descr * new; npy_intp offset; if (NPY_TITLE_KEY(key, value)) { @@ -3041,7 +3041,7 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) PyArray_Descr *new; npy_intp offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); + tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { goto finish; } diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index fcff3ad6ca74..a6c683f26a8b 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -268,7 +268,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, int ret; name = PyTuple_GET_ITEM(ldescr->names, k); - item = PyDict_GetItem(ldescr->fields, name); + item = PyDict_GetItem(ldescr->fields, name); // noqa: borrowed-ref OK child = (PyArray_Descr*)PyTuple_GetItem(item, 0); offset_obj = PyTuple_GetItem(item, 1); diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index fee0d4a61a78..179c8e404322 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1522,7 +1522,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyTypeObject *new = (PyTypeObject *)obj; _ADDDOC(new->tp_doc, new->tp_name); if (new->tp_dict != NULL && PyDict_CheckExact(new->tp_dict) && - PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { + PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { // noqa: borrowed-ref - manual fix needed /* Warning: Modifying `tp_dict` is not generally safe! */ if (PyDict_SetItemString(new->tp_dict, "__doc__", str) < 0) { return NULL; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 1994dd0ee8f7..d487aa16727d 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -130,7 +130,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) * dimension_from_scalar as soon as possible. */ if (!PyLong_CheckExact(obj) && PySequence_Check(obj)) { - seq_obj = PySequence_Fast(obj, + seq_obj = PySequence_Fast(obj, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer."); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ @@ -1135,7 +1135,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) { PyObject *seq_obj = NULL; if (!PyLong_CheckExact(seq) && PySequence_Check(seq)) { - seq_obj = PySequence_Fast(seq, + seq_obj = PySequence_Fast(seq, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer"); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 59b6298b5815..d34d852a706b 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -344,7 +344,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) PyObject *cobj; key = PyLong_FromLong(type_num); - cobj = PyDict_GetItem(obj, key); + cobj = PyDict_GetItem(obj, key); // noqa: borrowed-ref OK Py_DECREF(key); if (cobj && PyCapsule_CheckExact(cobj)) { castfunc = PyCapsule_GetPointer(cobj, NULL); @@ -2749,7 +2749,7 @@ nonstructured_to_structured_resolve_descriptors( Py_ssize_t pos = 0; PyObject *key, *tuple; - while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { + while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { // noqa: borrowed-ref OK PyArray_Descr *field_descr = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); npy_intp field_view_off = NPY_MIN_INTP; NPY_CASTING field_casting = PyArray_GetCastInfo( @@ -2898,7 +2898,7 @@ structured_to_nonstructured_resolve_descriptors( return -1; } PyObject *key = PyTuple_GetItem(PyDataType_NAMES(given_descrs[0]), 0); - PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); + PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); // noqa: borrowed-ref OK base_descr = (PyArray_Descr *)PyTuple_GET_ITEM(base_tup, 0); struct_view_offset = PyLong_AsSsize_t(PyTuple_GET_ITEM(base_tup, 1)); if (error_converting(struct_view_offset)) { @@ -3033,7 +3033,7 @@ can_cast_fields_safety( for (Py_ssize_t i = 0; i < field_count; i++) { npy_intp field_view_off = NPY_MIN_INTP; PyObject *from_key = PyTuple_GET_ITEM(PyDataType_NAMES(from), i); - PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); + PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); // noqa: borrowed-ref OK if (from_tup == NULL) { return give_bad_field_error(from_key); } @@ -3041,7 +3041,7 @@ can_cast_fields_safety( /* Check whether the field names match */ PyObject *to_key = PyTuple_GET_ITEM(PyDataType_NAMES(to), i); - PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); + PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); // noqa: borrowed-ref OK if (to_tup == NULL) { return give_bad_field_error(from_key); } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index ff30e581cd91..4f466677c57c 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2114,7 +2114,7 @@ _is_default_descr(PyObject *descr, PyObject *typestr) { if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) { return 0; } - PyObject *tuple = PyList_GET_ITEM(descr, 0); + PyObject *tuple = PyList_GET_ITEM(descr, 0); // noqa: borrowed-ref - manual fix needed if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) { return 0; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index c32b0e5e3f9f..135deee83c97 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -424,7 +424,7 @@ _convert_from_array_descr(PyObject *obj, int align) return NULL; } for (int i = 0; i < n; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) { PyErr_Format(PyExc_TypeError, "Field elements must be 2- or 3-tuples, got '%R'", @@ -507,10 +507,10 @@ _convert_from_array_descr(PyObject *obj, int align) "StringDType is not currently supported for structured dtype fields."); goto fail; } - if ((PyDict_GetItemWithError(fields, name) != NULL) + if ((PyDict_GetItemWithError(fields, name) != NULL) // noqa: borrowed-ref OK || (title && PyUnicode_Check(title) - && (PyDict_GetItemWithError(fields, title) != NULL))) { + && (PyDict_GetItemWithError(fields, title) != NULL))) { // noqa: borrowed-ref OK PyErr_Format(PyExc_ValueError, "field %R occurs more than once", name); Py_DECREF(conv); @@ -548,7 +548,7 @@ _convert_from_array_descr(PyObject *obj, int align) goto fail; } if (PyUnicode_Check(title)) { - PyObject *existing = PyDict_GetItemWithError(fields, title); + PyObject *existing = PyDict_GetItemWithError(fields, title); // noqa: borrowed-ref OK if (existing == NULL && PyErr_Occurred()) { goto fail; } @@ -613,7 +613,7 @@ _convert_from_list(PyObject *obj, int align) * Ignore any empty string at end which _internal._commastring * can produce */ - PyObject *last_item = PyList_GET_ITEM(obj, n-1); + PyObject *last_item = PyList_GET_ITEM(obj, n-1); // noqa: borrowed-ref OK if (PyUnicode_Check(last_item)) { Py_ssize_t s = PySequence_Size(last_item); if (s < 0) { @@ -643,7 +643,7 @@ _convert_from_list(PyObject *obj, int align) int totalsize = 0; for (int i = 0; i < n; i++) { PyArray_Descr *conv = _convert_from_any( - PyList_GET_ITEM(obj, i), align); + PyList_GET_ITEM(obj, i), align); // noqa: borrowed-ref OK if (conv == NULL) { goto fail; } @@ -794,7 +794,7 @@ _validate_union_object_dtype(_PyArray_LegacyDescr *new, _PyArray_LegacyDescr *co if (name == NULL) { return -1; } - tup = PyDict_GetItemWithError(conv->fields, name); + tup = PyDict_GetItemWithError(conv->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -940,7 +940,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -960,7 +960,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -1213,7 +1213,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Insert into dictionary */ - if (PyDict_GetItemWithError(fields, name) != NULL) { + if (PyDict_GetItemWithError(fields, name) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "name already used as a name or title"); Py_DECREF(tup); @@ -1232,7 +1232,7 @@ _convert_from_dict(PyObject *obj, int align) } if (len == 3) { if (PyUnicode_Check(title)) { - if (PyDict_GetItemWithError(fields, title) != NULL) { + if (PyDict_GetItemWithError(fields, title) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); Py_DECREF(tup); @@ -1895,7 +1895,7 @@ _convert_from_str(PyObject *obj, int align) if (typeDict == NULL) { goto fail; } - PyObject *item = PyDict_GetItemWithError(typeDict, obj); + PyObject *item = PyDict_GetItemWithError(typeDict, obj); // noqa: borrowed-ref - manual fix needed if (item == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2276,7 +2276,7 @@ _arraydescr_isnative(PyArray_Descr *self) PyArray_Descr *new; int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2422,7 +2422,7 @@ arraydescr_names_set( int ret; key = PyTuple_GET_ITEM(self->names, i); /* Borrowed references to item and new_key */ - item = PyDict_GetItemWithError(self->fields, key); + item = PyDict_GetItemWithError(self->fields, key); // noqa: borrowed-ref OK if (item == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -2848,7 +2848,7 @@ _descr_find_object(PyArray_Descr *self) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2964,7 +2964,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) if (fields != Py_None) { PyObject *key, *list; key = PyLong_FromLong(-1); - list = PyDict_GetItemWithError(fields, key); + list = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (!list) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3140,7 +3140,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { name = PyTuple_GET_ITEM(names, i); - field = PyDict_GetItemWithError(fields, name); + field = PyDict_GetItemWithError(fields, name); // noqa: borrowed-ref OK if (!field) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3344,7 +3344,7 @@ PyArray_DescrNewByteorder(PyArray_Descr *oself, char newendian) return NULL; } /* make new dictionary with replaced PyArray_Descr Objects */ - while (PyDict_Next(self->fields, &pos, &key, &value)) { + while (PyDict_Next(self->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -3470,7 +3470,7 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype) if (key == NULL) { return 0; } - tup = PyDict_GetItem(fields, key); + tup = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { return 0; } @@ -3635,7 +3635,7 @@ _check_has_fields(PyArray_Descr *self) static PyObject * _subscript_by_name(_PyArray_LegacyDescr *self, PyObject *op) { - PyObject *obj = PyDict_GetItemWithError(self->fields, op); + PyObject *obj = PyDict_GetItemWithError(self->fields, op); // noqa: borrowed-ref OK if (obj == NULL) { if (!PyErr_Occurred()) { PyErr_Format(PyExc_KeyError, @@ -3672,7 +3672,7 @@ _is_list_of_strings(PyObject *obj) } seqlen = PyList_GET_SIZE(obj); for (i = 0; i < seqlen; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyUnicode_Check(item)) { return NPY_FALSE; } @@ -3716,7 +3716,7 @@ arraydescr_field_subset_view(_PyArray_LegacyDescr *self, PyObject *ind) */ PyTuple_SET_ITEM(names, i, name); - tup = PyDict_GetItemWithError(self->fields, name); + tup = PyDict_GetItemWithError(self->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { PyErr_SetObject(PyExc_KeyError, name); diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 188a55a4b5f5..64d5bfa89e8e 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -2319,7 +2319,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), *out_flags = PyArrayMethod_MINIMAL_FLAGS; for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { PyMem_Free(data); @@ -2383,7 +2383,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), NPY_traverse_info_init(&data->decref_src); key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), 0); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { PyMem_Free(data); @@ -2435,14 +2435,14 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), /* set up the transfer function for each field */ for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return NPY_FAIL; } key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); @@ -3831,4 +3831,4 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape, *out_dataC = dataC; *out_ndim = ndim; return 0; -} +} \ No newline at end of file diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 91b1889b7d1f..e86aab7411d4 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -346,7 +346,7 @@ get_fields_traverse_function( int offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dtype->fields, key); + tup = PyDict_GetItem(dtype->fields, key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return -1; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 0b1b0fb39192..5ac7ef3b2320 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -693,7 +693,7 @@ void_ensure_canonical(_PyArray_LegacyDescr *self) int maxalign = 1; for (Py_ssize_t i = 0; i < field_num; i++) { PyObject *name = PyTuple_GET_ITEM(self->names, i); - PyObject *tuple = PyDict_GetItem(self->fields, name); + PyObject *tuple = PyDict_GetItem(self->fields, name); // noqa: borrowed-ref OK PyObject *new_tuple = PyTuple_New(PyTuple_GET_SIZE(tuple)); PyArray_Descr *field_descr = NPY_DT_CALL_ensure_canonical( (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0)); diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index f570caf1588f..d5359d86390f 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -127,7 +127,7 @@ static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject* * For each field, add the key + descr + offset to l */ key = PyTuple_GET_ITEM(names, pos); - value = PyDict_GetItem(fields, key); + value = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK /* XXX: are those checks necessary ? */ if (value == NULL) { PyErr_SetString(PyExc_SystemError, diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 422c690882ab..d83f0d7a3ac3 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -1489,7 +1489,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, return NULL; } - fast_seq = PySequence_Fast(args, ""); // needed for pypy + fast_seq = PySequence_Fast(args, ""); // needed for pypy // noqa: borrowed-ref - manual fix needed if (fast_seq == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/legacy_dtype_implementation.c b/numpy/_core/src/multiarray/legacy_dtype_implementation.c index 70b4fa1e49db..abfc1bd0e3cd 100644 --- a/numpy/_core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/_core/src/multiarray/legacy_dtype_implementation.c @@ -320,8 +320,8 @@ can_cast_fields(PyObject *field1, PyObject *field2, NPY_CASTING casting) /* Iterate over all the fields and compare for castability */ ppos = 0; - while (PyDict_Next(field1, &ppos, &key, &tuple1)) { - if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { + while (PyDict_Next(field1, &ppos, &key, &tuple1)) { // noqa: borrowed-ref OK + if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { // noqa: borrowed-ref OK return 0; } /* Compare the dtype of the field for castability */ @@ -372,7 +372,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, Py_ssize_t ppos = 0; PyObject *tuple; PyArray_Descr *field; - PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); + PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); // noqa: borrowed-ref OK field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); /* * For a subarray, we need to get the underlying type; diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 7483448e632b..12f25534f1d0 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1351,7 +1351,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) npy_intp offset; /* get the field offset and dtype */ - tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); + tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); // noqa: borrowed-ref OK if (tup == NULL && PyErr_Occurred()) { return 0; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 58a554dc40be..50f7f5f3c73b 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1005,7 +1005,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); + fast = PySequence_Fast(args, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (fast == NULL) { return -1; } @@ -1033,7 +1033,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); + where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); // noqa: borrowed-ref OK if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -1115,7 +1115,7 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); return NULL; } - types = PySequence_Fast( + types = PySequence_Fast( // noqa: borrowed-ref - manual fix needed types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { @@ -1566,7 +1566,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, PyArray_Descr *new; int offset, res; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -1733,7 +1733,7 @@ _setlist_pkl(PyArrayObject *self, PyObject *list) return -1; } while(iter->index < iter->size) { - theobject = PyList_GET_ITEM(list, iter->index); + theobject = PyList_GET_ITEM(list, iter->index); // noqa: borrowed-ref OK setitem(theobject, iter->dataptr, self); PyArray_ITER_NEXT(iter); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e80c6c0cd45c..b8ade23b6d76 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2764,7 +2764,7 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) npy_intp i, size; PyObject *item; - obj = PySequence_Fast(obj, "the subscripts for each operand must " + obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref - manual fix needed "be a list or a tuple"); if (obj == NULL) { return -1; diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 27c392db8720..0af2ee408b4b 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -588,7 +588,7 @@ npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref - manual fix needed if (op_in == NULL) { Py_DECREF(op_in); return -1; diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 571b50372684..ac70f38f39a5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -184,7 +184,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -246,7 +246,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -480,7 +480,7 @@ _fill_with_none(char *optr, PyArray_Descr *dtype) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index c459fa826e53..e5a294ecb01d 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -61,7 +61,7 @@ create_conv_funcs( Py_ssize_t pos = 0; int error = 0; Py_BEGIN_CRITICAL_SECTION(converters); - while (PyDict_Next(converters, &pos, &key, &value)) { + while (PyDict_Next(converters, &pos, &key, &value)) { // noqa: borrowed-ref OK Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 445f7ad7fe67..9d026f32044f 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -344,7 +344,7 @@ static int _warn_if_cast_exists_already( if (to_DType == NULL) { return -1; } - PyObject *cast_impl = PyDict_GetItemWithError( + PyObject *cast_impl = PyDict_GetItemWithError( // noqa: borrowed-ref OK NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType); Py_DECREF(to_DType); if (cast_impl == NULL) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index fd9ae9f1e41d..1d2c3edbd3b9 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1368,7 +1368,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Get axes tuple for operand. If not a tuple already, make it one if * there is only one axis (its content is checked later). */ - op_axes_tuple = PyList_GET_ITEM(axes, iop); + op_axes_tuple = PyList_GET_ITEM(axes, iop); // noqa: borrowed-ref - manual fix needed if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ @@ -4944,7 +4944,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, function, arg_typenums, data); if (result == 0) { - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { result = -1; } @@ -5075,7 +5075,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, */ int add_new_loop = 1; for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) { - PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); + PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0); int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ); @@ -5117,7 +5117,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, funcdata->nargs = 0; /* Get entry for this user-defined type*/ - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 95670efb936f..8d617f3ddc6c 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1455,7 +1455,7 @@ find_userloop(PyUFuncObject *ufunc, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(ufunc->userloops, key); + obj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1742,7 +1742,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1813,7 +1813,7 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index e5cf2cf8acb3..3efb02bd4a49 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -267,11 +267,11 @@ int initumath(PyObject *m) PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); - s = PyDict_GetItemString(d, "divide"); + s = PyDict_GetItemString(d, "divide"); // noqa: borrowed-ref OK PyDict_SetItemString(d, "true_divide", s); - s = PyDict_GetItemString(d, "conjugate"); - s2 = PyDict_GetItemString(d, "remainder"); + s = PyDict_GetItemString(d, "conjugate"); // noqa: borrowed-ref OK + s2 = PyDict_GetItemString(d, "remainder"); // noqa: borrowed-ref OK /* Setup the array object's numerical structures with appropriate ufuncs in d*/ diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 5c2b4bdf0931..d6664d6bdfb7 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -47,7 +47,7 @@ F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) "failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -87,7 +87,7 @@ F2PyGetThreadLocalCallbackPtr(char *key) "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -365,7 +365,7 @@ fortran_getattr(PyFortranObject *fp, char *name) if (fp->dict != NULL) { // python 3.13 added PyDict_GetItemRef #if PY_VERSION_HEX < 0x030D0000 - PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); // noqa: borrowed-ref OK if (v == NULL && PyErr_Occurred()) { return NULL; } @@ -822,7 +822,7 @@ get_elsize(PyObject *obj) { } else if (PyUnicode_Check(obj)) { return PyUnicode_GET_LENGTH(obj); } else if (PySequence_Check(obj)) { - PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); // noqa: borrowed-ref OK if (fast != NULL) { Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); int sz, elsize = 0; diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh new file mode 100644 index 000000000000..9f4203284823 --- /dev/null +++ b/tools/ci/check_c_api_usage.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +set -e + +# List of suspicious function calls: +SUSPICIOUS_FUNCS=( + "PyList_GetItem" + "PyDict_GetItem" + "PyDict_GetItemWithError" + "PyDict_GetItemString" + "PyDict_SetDefault" + "PyDict_Next" + "PyWeakref_GetObject" + "PyWeakref_GET_OBJECT" + "PyList_GET_ITEM" + "_PyDict_GetItemStringWithError" + "PySequence_Fast" +) + +# Find all C/C++ source files in the repo +ALL_FILES=$(find numpy -type f \( -name "*.c" -o -name "*.h" -o -name "*.c.src" -o -name "*.cpp" \) ! -path "*/pythoncapi-compat/*") + +# For debugging: print out file count +echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." + +# Prepare a result file +mkdir -p .tmp +OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX.txt) +echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT + +FAIL=0 + +# Scan each changed file +for file in $ALL_FILES; do + + for func in "${SUSPICIOUS_FUNCS[@]}"; do + # -n : show line number + # -P : perl-style boundaries + # (?> "$OUTPUT" + echo " -> $line" >> "$OUTPUT" + echo "Recommendation:" >> "$OUTPUT" + echo "If this use is intentional and safe, add '// noqa: borrowed-ref OK' on the same line to silence this warning." >> "$OUTPUT" + echo "Otherwise, consider replacing $func with a thread-safe API function." >> "$OUTPUT" + echo "" >> "$OUTPUT" + FAIL=1 + done <<< "$matches" + fi + done +done + +if [[ $FAIL -eq 1 ]]; then + echo "C API borrow-ref linter found issues." +else + echo "C API borrow-ref linter found no issues." > $OUTPUT +fi + +cat "$OUTPUT" +exit "$FAIL" diff --git a/tools/linter.py b/tools/linter.py index 1ce9ca763343..4b5115c97a2b 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -18,6 +18,7 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: Unlike pycodestyle, ruff by itself is not capable of limiting its output to the given diff. """ + print("Running Ruff Check...") command = ["ruff", "check"] if fix: command.append("--fix") @@ -31,12 +32,35 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: return res.returncode, res.stdout def run_lint(self, fix: bool) -> None: - retcode, errors = self.run_ruff(fix) - errors and print(errors) + # Ruff Linter + retcode, ruff_errors = self.run_ruff(fix) + ruff_errors and print(ruff_errors) + + if retcode: + sys.exit(retcode) + + # C API Borrowed-ref Linter + retcode, c_API_errors = self.run_check_c_api() + c_API_errors and print(c_API_errors) sys.exit(retcode) + def run_check_c_api(self) -> tuple[int, str]: + # Running borrowed ref checker + print("Running C API borrow-reference linter...") + borrowed_ref_script = os.path.join(self.repository_root, "tools", "ci", + "check_c_api_usage.sh") + borrowed_res = subprocess.run( + ["bash", borrowed_ref_script], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + ) + + # Exit with non-zero if C API Check fails + return borrowed_res.returncode, borrowed_res.stdout + if __name__ == "__main__": parser = ArgumentParser() From d02611ad99488637b48f4be203f297ea7b29c95d Mon Sep 17 00:00:00 2001 From: hannah Date: Thu, 17 Jul 2025 05:43:20 -0400 Subject: [PATCH 213/294] Doc: use different data in stacked arrays (#29390) Improve docstring examples for dstack and column_stack --- numpy/lib/_shape_base_impl.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 89b86c80964d..c44d603611ee 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -653,11 +653,11 @@ def column_stack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1, 4], + [2, 5], + [3, 6]]) """ arrays = [] @@ -713,18 +713,18 @@ def dstack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) + array([[[1, 4], + [2, 5], + [3, 6]]]) >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) + >>> b = np.array([[4],[5],[6]]) >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) + array([[[1, 4]], + [[2, 5]], + [[3, 6]]]) """ arrs = atleast_3d(*tup) From bd5cb2a293c02b8a50864cec1a751afc54f2ff47 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Sat, 19 Jul 2025 23:55:14 -0700 Subject: [PATCH 214/294] MNT: Cleanup infs handling in np.testing assertion utilities (#29321) * TST: Add failing test showing shape mismatch issue Recently, #29112 added showing first mismatches indices, but assert_array_almost_equal.compare trips it up by returning a shape different from its input, causing an IndexError: ``` <...> if invalids.ndim != 0: if flagged.ndim > 0: > positions = np.argwhere(np.asarray(~flagged))[invalids] E IndexError: boolean index did not match indexed array along axis 0; size of axis is 3 but size of corresponding boolean axis is 2 ``` (traceback shown using pytest --full-trace) * MNT: Remove assert_array_almost_equal old infs logic, but tests fail A nice cleanup (newer, similar inf handling already exists now in assert_array_compare), and resolves the shape mismatch issue. However, the removed logic was handling complex infs while the one isn't, causing the new test and an existing one (TestInterp::test_complex_interp) to now fail with RuntimeWarnings attempting to subtract the complex infs. * MNT: assert_array_compare handles all inf values assert_array_compare now tests all inf values for matching position and value, including complex infs. Fixes the failing tests. * TST: Test array_allclose behavior for inf values The behavior for real infs is the same is before. For complex infs, demonstrates that the behavior for mismatching values is now cleaner, showing a concise error message vs. previously displaying nan max errors. For complex infs with matching values, the behavior is the same as before, accepting them as equal (although internally they would now be filtered ahead of being passed to isclose, like real infs already had been). * TST: assert_allclose behavior with nans that are also infs * MNT: Extract robust_any_difference() helper to DRY --- numpy/testing/_private/utils.py | 84 ++++++++++++++++++------------- numpy/testing/tests/test_utils.py | 49 ++++++++++++++++++ 2 files changed, 98 insertions(+), 35 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e868239e62ab..592940f3e328 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -25,7 +25,7 @@ import numpy as np import numpy.linalg._umath_linalg -from numpy import isfinite, isinf, isnan +from numpy import isfinite, isnan from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray __all__ = [ @@ -758,17 +758,7 @@ def istime(x): def isvstring(x): return x.dtype.char == "T" - def func_assert_same_pos(x, y, func=isnan, hasval='nan'): - """Handling nan/inf. - - Combine results of running func on x and y, checking that they are True - at the same locations. - - """ - __tracebackhide__ = True # Hide traceback for py.test - - x_id = func(x) - y_id = func(y) + def robust_any_difference(x, y): # We include work-arounds here to handle three types of slightly # pathological ndarray subclasses: # (1) all() on fully masked arrays returns np.ma.masked, so we use != True @@ -781,10 +771,23 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): # not implement np.all(), so favor using the .all() method # We are not committed to supporting cases (2) and (3), but it's nice to # support them if possible. - result = x_id == y_id + result = x == y if not hasattr(result, "all") or not callable(result.all): result = np.bool(result) - if result.all() != True: + return result.all() != True + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -804,6 +807,29 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): else: return y_id + def assert_same_inf_values(x, y, infs_mask): + """ + Verify all inf values match in the two arrays + """ + __tracebackhide__ = True # Hide traceback for py.test + + if not infs_mask.any(): + return + if x.ndim > 0 and y.ndim > 0: + x = x[infs_mask] + y = y[infs_mask] + else: + assert infs_mask.all() + + if robust_any_difference(x, y): + msg = build_err_msg( + [x, y], + err_msg + '\ninf values mismatch:', + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + try: if strict: cond = x.shape == y.shape and x.dtype == y.dtype @@ -828,12 +854,15 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') if equal_inf: - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == +inf, - hasval='+inf') - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == -inf, - hasval='-inf') + # If equal_nan=True, skip comparing nans below for equality if they are + # also infs (e.g. inf+nanj) since that would always fail. + isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged)) + infs_mask = func_assert_same_pos( + x, y, + func=isinf_func, + hasval='inf') + assert_same_inf_values(x, y, infs_mask) + flagged |= infs_mask elif istime(x) and istime(y): # If one is datetime64 and the other timedelta64 there is no point @@ -1183,24 +1212,9 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', """ __tracebackhide__ = True # Hide traceback for py.test from numpy._core import number, result_type - from numpy._core.fromnumeric import any as npany from numpy._core.numerictypes import issubdtype def compare(x, y): - try: - if npany(isinf(x)) or npany(isinf(y)): - xinfid = isinf(x) - yinfid = isinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 243b8d420936..07f2c9da3005 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -612,6 +612,18 @@ def test_inf(self): assert_raises(AssertionError, lambda: self._assert_func(a, b)) + def test_complex_inf(self): + a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j]) + b = a.copy() + self._assert_func(a, b) + b[1] = 3. + 1.j + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n' + 'Max absolute difference among violations: 1.\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + def test_subclass(self): a = np.array([[1., 2.], [3., 4.]]) b = np.ma.masked_array([[1., 2.], [0., 4.]], @@ -1283,11 +1295,21 @@ def test_equal_nan(self): # Should not raise: assert_allclose(a, b, equal_nan=True) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_allclose(a, b, equal_nan=True) + b = np.array([complex(np.nan, -np.inf)]) + assert_allclose(a, b, equal_nan=True) + def test_not_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + def test_equal_nan_default(self): # Make sure equal_nan default behavior remains unchanged. (All # of these functions use assert_array_compare under the hood.) @@ -1336,6 +1358,33 @@ def test_strict(self): with pytest.raises(AssertionError): assert_allclose(x, x.astype(np.float32), strict=True) + def test_infs(self): + a = np.array([np.inf]) + b = np.array([np.inf]) + assert_allclose(a, b) + + b = np.array([3.]) + expected_msg = 'inf location mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + b = np.array([-np.inf]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + b = np.array([complex(np.inf, 1.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + a = np.array([complex(np.inf, 1.)]) + b = np.array([complex(np.inf, 1.)]) + assert_allclose(a, b) + + b = np.array([complex(np.inf, 2.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) class TestArrayAlmostEqualNulp: From 96cf781e226900ba88592f3a20145d6af63e863a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 11:59:22 +0200 Subject: [PATCH 215/294] STY: Apply ruff/Perflint rule PERF102 PERF102 When using only the values of a dict use the `values()` method PERF102 When using only the keys of a dict use the `keys()` method --- benchmarks/benchmarks/bench_ufunc.py | 4 ++-- numpy/f2py/func2subr.py | 2 +- numpy/ma/testutils.py | 8 ++++---- numpy/testing/_private/utils.py | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 7dc321ac2980..155e7d4f7421 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -53,7 +53,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * 1 # no nin try: self.afdn(*arg) @@ -100,7 +100,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * self.ufn.nin try: self.ufn(*arg) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 0a875006ed75..09b67f7c3085 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -77,7 +77,7 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): def useiso_c_binding(rout): useisoc = False - for key, value in rout['vars'].items(): + for value in rout['vars'].values(): kind_value = value.get('kindselector', {}).get('kind') if kind_value in isoc_kindmap: return True diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index bffcc34b759c..4840b758663c 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -124,7 +124,7 @@ def assert_equal(actual, desired, err_msg=''): for k, i in desired.items(): if k not in actual: raise AssertionError(f"{k} not in {actual}") - assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + assert_equal(actual[k], i, f'key={k!r}\n{err_msg}') return # Case #2: lists ..... if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): @@ -162,12 +162,12 @@ def fail_if_equal(actual, desired, err_msg='',): for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) - fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + fail_if_equal(actual[k], i, f'key={k!r}\n{err_msg}') return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): fail_if_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + for k, i in enumerate(desired): + fail_if_equal(actual[k], i, f'item={k!r}\n{err_msg}') return if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return fail_if_array_equal(actual, desired, err_msg) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 592940f3e328..e7bc76c584d6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -367,13 +367,13 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + assert_equal(actual[k], i, f'key={k!r}\n{err_msg}', verbose) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): assert_equal(len(actual), len(desired), err_msg, verbose) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + for k, i in enumerate(desired): + assert_equal(actual[k], i, f'item={k!r}\n{err_msg}', verbose) return from numpy import imag, iscomplexobj, real From e8d5153caceed246c378de72e5ebde15b1fc0996 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 12:00:41 +0200 Subject: [PATCH 216/294] STY: Apply ruff/Perflint rule PERF403 PERF403 Use `dict.update` instead of a for-loop PERF403 Use a dictionary comprehension instead of a for-loop --- numpy/_core/code_generators/genapi.py | 3 +-- numpy/lib/introspect.py | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index caeaf7a08532..b0036def7300 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -466,8 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - for k, v in d.items(): - ret[k] = v + ret.update(dict(d.items())) return ret diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 5526a332fead..7ea621b823c8 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -80,13 +80,14 @@ def opt_func_info(func_name=None, signature=None): sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): - matching_chars = {} - for chars, targets in v.items(): + matching_chars = { + chars: targets + for chars, targets in v.items() if any( sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ): - matching_chars[chars] = targets + ) + } if matching_chars: matching_sigs[k] = matching_chars else: From 4eeace478850ade16d74b9fb0db96b5342f73090 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 12:03:27 +0200 Subject: [PATCH 217/294] MNT: Enforce ruff/Perflint rules (PERF) --- ruff.toml | 54 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/ruff.toml b/ruff.toml index ed6026ed0665..98b6b0c7244d 100644 --- a/ruff.toml +++ b/ruff.toml @@ -32,6 +32,7 @@ extend-select = [ "FLY", "I", "PD", + "PERF", "E", "W", "PGH", @@ -39,32 +40,33 @@ extend-select = [ "UP", ] ignore = [ - "B006", # Do not use mutable data structures for argument defaults - "B007", # Loop control variable not used within loop body - "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` - "B023", # Function definition does not bind loop variable - "B028", # No explicit `stacklevel` keyword argument found - "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling - "B905", #`zip()` without an explicit `strict=` parameter - "C408", # Unnecessary `dict()` call (rewrite as a literal) - "ISC002", # Implicitly concatenated string literals over multiple lines - "PIE790", # Unnecessary `pass` statement - "PD901", # Avoid using the generic variable name `df` for DataFrames - "E241", # Multiple spaces after comma - "E265", # Block comment should start with `# ` - "E266", # Too many leading `#` before block comment - "E302", # TODO: Expected 2 blank lines, found 1 - "E402", # Module level import not at top of file - "E712", # Avoid equality comparisons to `True` or `False` - "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check - "E731", # Do not assign a `lambda` expression, use a `def` - "E741", # Ambiguous variable name - "F403", # `from ... import *` used; unable to detect undefined names - "F405", # may be undefined, or defined from star imports - "F821", # Undefined name - "F841", # Local variable is assigned to but never used - "UP015", # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format + "B006", # Do not use mutable data structures for argument defaults + "B007", # Loop control variable not used within loop body + "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` + "B023", # Function definition does not bind loop variable + "B028", # No explicit `stacklevel` keyword argument found + "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling + "B905", #`zip()` without an explicit `strict=` parameter + "C408", # Unnecessary `dict()` call (rewrite as a literal) + "ISC002", # Implicitly concatenated string literals over multiple lines + "PIE790", # Unnecessary `pass` statement + "PD901", # Avoid using the generic variable name `df` for DataFrames + "PERF401", # PERF401 Use a list comprehension to create a transformed list + "E241", # Multiple spaces after comma + "E265", # Block comment should start with `# ` + "E266", # Too many leading `#` before block comment + "E302", # TODO: Expected 2 blank lines, found 1 + "E402", # Module level import not at top of file + "E712", # Avoid equality comparisons to `True` or `False` + "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name + "F403", # `from ... import *` used; unable to detect undefined names + "F405", # may be undefined, or defined from star imports + "F821", # Undefined name + "F841", # Local variable is assigned to but never used + "UP015" , # Unnecessary mode argument + "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] From adedeb093f0254c27e46da84c1139c1f702f6bab Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 23:55:18 +0200 Subject: [PATCH 218/294] STY: better solution for PERF102 Co-authored-by: Pieter Eendebak --- numpy/ma/testutils.py | 12 ++++++------ numpy/testing/_private/utils.py | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index 4840b758663c..0df3b1757fd6 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -121,10 +121,10 @@ def assert_equal(actual, desired, err_msg=''): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(f"{k} not in {actual}") - assert_equal(actual[k], i, f'key={k!r}\n{err_msg}') + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') return # Case #2: lists ..... if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): @@ -159,15 +159,15 @@ def fail_if_equal(actual, desired, err_msg='',): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) - fail_if_equal(actual[k], i, f'key={k!r}\n{err_msg}') + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): fail_if_equal(len(actual), len(desired), err_msg) - for k, i in enumerate(desired): - fail_if_equal(actual[k], i, f'item={k!r}\n{err_msg}') + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') return if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return fail_if_array_equal(actual, desired, err_msg) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e7bc76c584d6..e4a74e69afb0 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -364,16 +364,16 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) - assert_equal(actual[k], i, f'key={k!r}\n{err_msg}', + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', verbose) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in enumerate(desired): - assert_equal(actual[k], i, f'item={k!r}\n{err_msg}', + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', verbose) return from numpy import imag, iscomplexobj, real From c56f86d6a1c54267164cf67407db1a95b0307b20 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 29 May 2025 00:27:18 +0200 Subject: [PATCH 219/294] STY: taken into account reviewer's comments Co-authored-by: Joren Hammudoglu --- numpy/_core/code_generators/genapi.py | 2 +- numpy/lib/introspect.py | 9 ++++----- ruff.toml | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index b0036def7300..6a370a7dc3cd 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -466,7 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - ret.update(dict(d.items())) + ret.update(d) return ret diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 7ea621b823c8..816c79a669b9 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -80,14 +80,13 @@ def opt_func_info(func_name=None, signature=None): sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): - matching_chars = { - chars: targets - for chars, targets in v.items() + matching_chars = {} + for chars, targets in v.items(): if any( sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ) - } + ): + matching_chars[chars] = targets # noqa: PERF403 if matching_chars: matching_sigs[k] = matching_chars else: diff --git a/ruff.toml b/ruff.toml index 98b6b0c7244d..4666efdf39bc 100644 --- a/ruff.toml +++ b/ruff.toml @@ -51,7 +51,7 @@ ignore = [ "ISC002", # Implicitly concatenated string literals over multiple lines "PIE790", # Unnecessary `pass` statement "PD901", # Avoid using the generic variable name `df` for DataFrames - "PERF401", # PERF401 Use a list comprehension to create a transformed list + "PERF401", # Use a list comprehension to create a transformed list "E241", # Multiple spaces after comma "E265", # Block comment should start with `# ` "E266", # Too many leading `#` before block comment From 5a031d9b8822551711973e6ae220cd00817c9523 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 21 Jul 2025 10:44:47 -0600 Subject: [PATCH 220/294] ENH: avoid thread safety issues around uses of `PySequence_Fast` (#29394) * BUG: prevent mutating multiter arguments while processing them * MAINT: also add locking for array creation from array-likes * MAINT: add lock nditer argument with a critical section * MAINT: give NO_BRACKETS variants a label argument * MAINT: more critical sections * MAINT: apply suggestions from Sam * MAINT: fix linter and GIL-enabled build compilation * MAINT: simplify error paths * Update numpy/_core/src/multiarray/iterators.c Co-authored-by: Sebastian Berg --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/common/npy_pycompat.h | 52 ++++++++++++++++ numpy/_core/src/multiarray/ctors.c | 59 ++++++++++--------- numpy/_core/src/multiarray/iterators.c | 13 ++-- numpy/_core/src/multiarray/methods.c | 25 ++++---- numpy/_core/src/multiarray/multiarraymodule.c | 31 +++++----- numpy/_core/src/multiarray/nditer_pywrap.c | 38 +++++++++--- numpy/_core/tests/test_multithreading.py | 47 +++++++++++++++ 7 files changed, 200 insertions(+), 65 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 769b90215f2b..f751af666524 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -6,4 +6,56 @@ #define Npy_HashDouble _Py_HashDouble +#ifdef Py_GIL_DISABLED +// Specialized version of critical section locking to safely use +// PySequence_Fast APIs without the GIL. For performance, the argument *to* +// PySequence_Fast() is provided to the macro, not the *result* of +// PySequence_Fast(), which would require an extra test to determine if the +// lock must be acquired. +// +// These are tweaked versions of macros defined in CPython in +// pycore_critical_section.h, originally added in CPython commit baf347d91643. +// They're defined in terms of the NPY_*_CRITICAL_SECTION_NO_BRACKETS to avoid +// repition and should behave identically to the versions in CPython. Once the +// macros are expanded, The only difference relative to those versions is the +// use of public C API symbols that are equivalent to the ones used in the +// corresponding CPython definitions. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ + { \ + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS( \ + original, npy_cs_fast) +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(npy_cs_fast) \ + } + +// These macros are more flexible than the versions in the public CPython C API, +// but that comes at a cost. Here are some differences and limitations: +// +// * cs_name is a named label for the critical section. If you must nest +// critical sections, do *not* use the same name for multiple nesting +// critical sections. +// * The beginning and ending macros must happen within the same scope +// and the compiler won't necessarily enforce that. +// * The macros ending critical sections accept a named label. The label +// must match the opening critical section. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) \ + PyObject *_##cs_name##_orig_seq = (PyObject *)(original); \ + const int _##cs_name##_should_lock_cs = \ + PyList_CheckExact(_##cs_name##_orig_seq); \ + PyCriticalSection _##cs_name; \ + if (_##cs_name##_should_lock_cs) { \ + PyCriticalSection_Begin(&_##cs_name, _##cs_name##_orig_seq); \ + } +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) \ + if (_##cs_name##_should_lock_cs) { \ + PyCriticalSection_End(&_##cs_name); \ + } +#else +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } +#endif + + #endif /* NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ */ diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 4f466677c57c..498fa78118b3 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1539,7 +1539,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. */ - PyArrayObject *arr = NULL, *ret; + PyArrayObject *arr = NULL, *ret = NULL; PyArray_Descr *dtype = NULL; coercion_cache_obj *cache = NULL; int ndim = 0; @@ -1560,12 +1560,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, copy = 1; } + Py_BEGIN_CRITICAL_SECTION(op); + ndim = PyArray_DiscoverDTypeAndShape( op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); if (ndim < 0) { - return NULL; + goto cleanup; } /* If the cache is NULL, then the object is considered a scalar */ @@ -1578,16 +1580,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (min_depth != 0 && ndim < min_depth) { PyErr_SetString(PyExc_ValueError, "object of too small depth for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (max_depth != 0 && ndim > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Got the correct parameters, but the cache may already hold the result */ @@ -1602,9 +1602,11 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (was_copied_by__array__ == 1) { flags = flags & ~NPY_ARRAY_ENSURECOPY; } - PyObject *res = PyArray_FromArray(arr, dtype, flags); + // PyArray_FromArray steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); - return res; + goto cleanup; } else if (cache == NULL && PyArray_IsScalar(op, Void) && !(((PyVoidScalarObject *)op)->flags & NPY_ARRAY_OWNDATA) && @@ -1619,13 +1621,15 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * provide a dtype (newtype is NULL). */ assert(ndim == 0); - - return PyArray_NewFromDescrAndBase( + // PyArray_NewFromDescrAndBase steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, dtype, 0, NULL, NULL, ((PyVoidScalarObject *)op)->obval, ((PyVoidScalarObject *)op)->flags, NULL, op); + goto cleanup; } /* * If we got this far, we definitely have to create a copy, since we are @@ -1633,9 +1637,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, */ if (flags & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (cache == NULL && in_descr != NULL && @@ -1662,16 +1665,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * have a better solution at some point): * https://github.com/pandas-dev/pandas/issues/35481 */ - return PyArray_FromScalar(op, dtype); + // PyArray_FromScalar steals a reference to dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromScalar(op, dtype); + goto cleanup; } /* There was no array (or array-like) passed in directly. */ if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyErr_SetString(PyExc_TypeError, "WRITEBACKIFCOPY used for non-array input."); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Create a new array and copy the data */ @@ -1681,8 +1686,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, flags&NPY_ARRAY_F_CONTIGUOUS, NULL); if (ret == NULL) { npy_free_coercion_cache(cache); - Py_DECREF(dtype); - return NULL; + goto cleanup; } if (ndim == PyArray_NDIM(ret)) { /* @@ -1699,12 +1703,10 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(ndim == 0); if (PyArray_Pack(dtype, PyArray_BYTES(ret), op) < 0) { - Py_DECREF(dtype); - Py_DECREF(ret); - return NULL; + Py_CLEAR(ret); + goto cleanup; } - Py_DECREF(dtype); - return (PyObject *)ret; + goto cleanup; } assert(ndim != 0); assert(op == cache->converted_obj); @@ -1717,15 +1719,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_AssignFromCache(ret, cache); + int succeed = PyArray_AssignFromCache(ret, cache); ((PyArrayObject_fields *)ret)->nd = out_ndim; ((PyArrayObject_fields *)ret)->descr = out_descr; - Py_DECREF(dtype); - if (success < 0) { - Py_DECREF(ret); - return NULL; + if (succeed < 0) { + Py_CLEAR(ret); } + +cleanup:; + + Py_XDECREF(dtype); + Py_END_CRITICAL_SECTION(); return (PyObject *)ret; } diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index d83f0d7a3ac3..f45328a2bb75 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -23,6 +23,7 @@ #include "item_selection.h" #include "lowlevel_strided_loops.h" #include "array_assign.h" +#include "npy_pycompat.h" #define NEWAXIS_INDEX -1 #define ELLIPSIS_INDEX -2 @@ -1476,6 +1477,7 @@ PyArray_MultiIterNew(int n, ...) return multiiter_new_impl(n, args_impl); } + static PyObject* arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) @@ -1488,18 +1490,19 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, "keyword arguments not accepted."); return NULL; } - - fast_seq = PySequence_Fast(args, ""); // needed for pypy // noqa: borrowed-ref - manual fix needed + fast_seq = PySequence_Fast(args, ""); // noqa: borrowed-ref OK if (fast_seq == NULL) { return NULL; } + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(fast_seq) n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { - Py_DECREF(fast_seq); - return multiiter_wrong_number_of_args(); + ret = multiiter_wrong_number_of_args(); + } else { + ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); } - ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); Py_DECREF(fast_seq); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() return ret; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 50f7f5f3c73b..7a9f5d83a57c 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -997,26 +997,26 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) int i; int nin, nout; PyObject *out_kwd_obj; - PyObject *fast; - PyObject **in_objs, **out_objs, *where_obj; + PyObject **out_objs, *where_obj; /* check inputs */ nin = PyTuple_Size(args); if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed - if (fast == NULL) { - return -1; - } - in_objs = PySequence_Fast_ITEMS(fast); for (i = 0; i < nin; ++i) { - if (PyUFunc_HasOverride(in_objs[i])) { - Py_DECREF(fast); +#if defined(PYPY_VERSION) || defined(Py_LIMITED_API) + PyObject *obj = PyTuple_GetItem(args, i); + if (obj == NULL) { + return -1; + } +#else + PyObject *obj = PyTuple_GET_ITEM(args, i); +#endif + if (PyUFunc_HasOverride(obj)) { return 1; } } - Py_DECREF(fast); if (kwds == NULL) { return 0; } @@ -1115,14 +1115,15 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); return NULL; } - types = PySequence_Fast( // noqa: borrowed-ref - manual fix needed + types = PySequence_Fast( // noqa: borrowed-ref OK types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { return NULL; } - + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(types); result = array_function_method_impl(func, types, args, kwargs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(types); return result; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index b8ade23b6d76..732919c347b0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2760,15 +2760,18 @@ einsum_sub_op_from_str( static int einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { - int ellipsis = 0, subindex = 0; + int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; PyObject *item; - obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref - manual fix needed + obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); if (obj == NULL) { return -1; } + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + size = PySequence_Size(obj); for (i = 0; i < size; ++i) { @@ -2778,14 +2781,12 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (ellipsis) { PyErr_SetString(PyExc_ValueError, "each subscripts list may have only one ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (subindex + 3 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } subscripts[subindex++] = '.'; subscripts[subindex++] = '.'; @@ -2800,16 +2801,14 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) PyErr_SetString(PyExc_TypeError, "each subscript must be either an integer " "or an ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } npy_bool bad_input = 0; if (subindex + 1 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (s < 0) { @@ -2828,16 +2827,20 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (bad_input) { PyErr_SetString(PyExc_ValueError, "subscript is not within the valid range [0, 52)"); - Py_DECREF(obj); - return -1; + goto cleanup; } } - } + ret = subindex; + + cleanup:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(obj); - return subindex; + return ret; + } /* diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 0af2ee408b4b..86198e9cd9f4 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -20,6 +20,7 @@ #include "common.h" #include "conversion_utils.h" #include "ctors.h" +#include "npy_pycompat.h" /* Functions not part of the public NumPy C API */ npy_bool npyiter_has_writeback(NpyIter *iter); @@ -588,7 +589,7 @@ npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref - manual fix needed + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref OK if (op_in == NULL) { Py_DECREF(op_in); return -1; @@ -719,19 +720,28 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) /* Need nop to set up workspaces */ PyObject **op_objs = NULL; - PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ - int nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); + PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ + PyArray_Descr **op_request_dtypes = NULL; + int pre_alloc_fail = 0; + int post_alloc_fail = 0; + int nop; + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(op_in, nditer_cs); + + nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); if (nop < 0) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } /* allocate workspace for Python objects (operands and dtypes) */ NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); if (op == NULL) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } memset(op, 0, sizeof(PyObject *) * 2 * nop); - PyArray_Descr **op_request_dtypes = (PyArray_Descr **)(op + nop); + op_request_dtypes = (PyArray_Descr **)(op + nop); /* And other workspaces (that do not need to clean up their content) */ NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); @@ -743,11 +753,25 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) * (NPY_ALLOC_WORKSPACE has to be done before a goto fail currently.) */ if (op_flags == NULL || op_axes_storage == NULL || op_axes == NULL) { - goto finish; + post_alloc_fail = 1; + goto cleanup; } /* op and op_flags */ if (npyiter_convert_ops(nop, op_objs, op_flags_in, op, op_flags) != 1) { + post_alloc_fail = 1; + goto cleanup; + } + +cleanup: + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(nditer_cs); + + if (pre_alloc_fail) { + goto pre_alloc_fail; + } + + if (post_alloc_fail) { goto finish; } diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 5f8a2f11ea1f..b81c989ae5c7 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -293,3 +293,50 @@ def func(index): assert expected_warning in str(ex) run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) + + +# These are all implemented using PySequence_Fast, which needs locking to be safe +def np_broadcast(arrs): + for i in range(100): + np.broadcast(arrs) + +def create_array(arrs): + for i in range(100): + np.array(arrs) + +def create_nditer(arrs): + for i in range(1000): + np.nditer(arrs) + +@pytest.mark.parametrize("kernel", (np_broadcast, create_array, create_nditer)) +def test_arg_locking(kernel): + # should complete without failing or generating an error about an array size + # changing + + b = threading.Barrier(5) + done = 0 + arrs = [] + + def read_arrs(): + nonlocal done + b.wait() + try: + kernel(arrs) + finally: + done += 1 + + def mutate_list(): + b.wait() + while done < 4: + if len(arrs) > 10: + arrs.pop(0) + elif len(arrs) <= 10: + arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) + + arrs = [np.array([1, 2, 3]) for _ in range(1000)] + + tasks = [threading.Thread(target=read_arrs) for _ in range(4)] + tasks.append(threading.Thread(target=mutate_list)) + + [t.start() for t in tasks] + [t.join() for t in tasks] From ec3ac3d31e244384856c3f1c6cc65a37ee3906f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 21 Jul 2025 19:01:42 +0100 Subject: [PATCH 221/294] TYP: Type ``MaskedArray.resize``, wrap ``NoReturn`` tests in functions (#29401) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 13 ++++- .../tests/data/reveal/lib_polynomial.pyi | 5 +- numpy/typing/tests/data/reveal/ma.pyi | 5 +- numpy/typing/tests/data/reveal/ufuncs.pyi | 57 ++++++++++++------- 4 files changed, 57 insertions(+), 23 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ee3b90f2891d..a3d05556f7ce 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3,7 +3,16 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload +from typing import ( + Any, + Literal, + Never, + NoReturn, + Self, + SupportsIndex, + TypeAlias, + overload, +) from typing_extensions import TypeIs, TypeVar import numpy as np @@ -1073,7 +1082,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... def reshape(self, *s, **kwargs): ... - def resize(self, newshape, refcheck=..., order=...): ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index 8b0a9f3d22e7..b7cbadefc610 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -118,7 +118,10 @@ assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) -assert_type(np.polysub(AR_b, AR_b), NoReturn) + +def test_invalid_polysub() -> None: + assert_type(np.polysub(AR_b, AR_b), NoReturn) + assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index b5a292330208..1d44c3f8ba3c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,4 +1,4 @@ -from typing import Any, Literal, TypeAlias, TypeVar, assert_type +from typing import Any, Literal, TypeAlias, TypeVar, assert_type, NoReturn import numpy as np from numpy import dtype, generic @@ -405,6 +405,9 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +def invalid_resize() -> None: + assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 93a8bfb15d06..0bfe4df9ad8d 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -98,26 +98,45 @@ assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) -assert_type(np.absolute.outer(), NoReturn) -assert_type(np.frexp.outer(), NoReturn) -assert_type(np.divmod.outer(), NoReturn) -assert_type(np.matmul.outer(), NoReturn) +def test_absolute_outer_invalid() -> None: + assert_type(np.absolute.outer(), NoReturn) +def test_frexp_outer_invalid() -> None: + assert_type(np.frexp.outer(), NoReturn) +def test_divmod_outer_invalid() -> None: + assert_type(np.divmod.outer(), NoReturn) +def test_matmul_outer_invalid() -> None: + assert_type(np.matmul.outer(), NoReturn) -assert_type(np.absolute.reduceat(), NoReturn) -assert_type(np.frexp.reduceat(), NoReturn) -assert_type(np.divmod.reduceat(), NoReturn) -assert_type(np.matmul.reduceat(), NoReturn) +def test_absolute_reduceat_invalid() -> None: + assert_type(np.absolute.reduceat(), NoReturn) +def test_frexp_reduceat_invalid() -> None: + assert_type(np.frexp.reduceat(), NoReturn) +def test_divmod_reduceat_invalid() -> None: + assert_type(np.divmod.reduceat(), NoReturn) +def test_matmul_reduceat_invalid() -> None: + assert_type(np.matmul.reduceat(), NoReturn) -assert_type(np.absolute.reduce(), NoReturn) -assert_type(np.frexp.reduce(), NoReturn) -assert_type(np.divmod.reduce(), NoReturn) -assert_type(np.matmul.reduce(), NoReturn) +def test_absolute_reduce_invalid() -> None: + assert_type(np.absolute.reduce(), NoReturn) +def test_frexp_reduce_invalid() -> None: + assert_type(np.frexp.reduce(), NoReturn) +def test_divmod_reduce_invalid() -> None: + assert_type(np.divmod.reduce(), NoReturn) +def test_matmul_reduce_invalid() -> None: + assert_type(np.matmul.reduce(), NoReturn) -assert_type(np.absolute.accumulate(), NoReturn) -assert_type(np.frexp.accumulate(), NoReturn) -assert_type(np.divmod.accumulate(), NoReturn) -assert_type(np.matmul.accumulate(), NoReturn) +def test_absolute_accumulate_invalid() -> None: + assert_type(np.absolute.accumulate(), NoReturn) +def test_frexp_accumulate_invalid() -> None: + assert_type(np.frexp.accumulate(), NoReturn) +def test_divmod_accumulate_invalid() -> None: + assert_type(np.divmod.accumulate(), NoReturn) +def test_matmul_accumulate_invalid() -> None: + assert_type(np.matmul.accumulate(), NoReturn) -assert_type(np.frexp.at(), NoReturn) -assert_type(np.divmod.at(), NoReturn) -assert_type(np.matmul.at(), NoReturn) +def test_frexp_at_invalid() -> None: + assert_type(np.frexp.at(), NoReturn) +def test_divmod_at_invalid() -> None: + assert_type(np.divmod.at(), NoReturn) +def test_matmul_at_invalid() -> None: + assert_type(np.matmul.at(), NoReturn) From c7a93c19765317464e3e1e301468a5c2ccd35737 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 19:13:28 +0000 Subject: [PATCH 222/294] MAINT: Bump github/codeql-action from 3.29.2 to 3.29.3 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.2 to 3.29.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/181d5eefc20863364f96762470ba6f862bdef56b...d6bbdef45e766d081b84a2def353b0055f728d3e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbee65f0b713..71abe29f9d09 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index aa3bf1aec7cc..de3d582f4750 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v2.1.27 + uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v2.1.27 with: sarif_file: results.sarif From fcb82dfe9137890326ff18920442c75bd6dc488b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 22 Jul 2025 08:00:54 +1000 Subject: [PATCH 223/294] use a stable pypy release in CI --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 3452724841c3..7ac5ea673d61 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,7 +82,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: 'pypy3.11-nightly' + python-version: 'pypy3.11-v7.3.20' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt From d2c0c106eab6bc79193964b2eab40298c7295094 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Tue, 22 Jul 2025 07:51:15 +0900 Subject: [PATCH 224/294] ENH: Show unit information in repr for datetime64("NaT") (#29396) --- .../upcoming_changes/29396.improvement.rst | 5 +++++ numpy/_core/src/multiarray/scalartypes.c.src | 21 ++++++++++++++----- numpy/_core/tests/test_datetime.py | 4 +++- 3 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/29396.improvement.rst diff --git a/doc/release/upcoming_changes/29396.improvement.rst b/doc/release/upcoming_changes/29396.improvement.rst new file mode 100644 index 000000000000..2cd3d81ad9d8 --- /dev/null +++ b/doc/release/upcoming_changes/29396.improvement.rst @@ -0,0 +1,5 @@ +Show unit information in ``__repr__`` for ``datetime64("NaT")`` +------------------------------------------------------------------ +When a `datetime64` object is "Not a Time" (NaT), its ``__repr__`` method now +includes the time unit of the datetime64 type. This makes it consistent with +the behavior of a `timedelta64` object. diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 03165b10337e..5724afda7e47 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -939,12 +939,23 @@ datetimetype_repr(PyObject *self) if (legacy_print_mode == -1) { return NULL; } - if (legacy_print_mode > 125) { - ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); - } - else { - ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + + PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); + if((scal->obval == NPY_DATETIME_NAT) && (meta != NULL)){ + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); + } else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s','%S')", iso, meta); + } + } else { + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); + } + else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + } } + } else { PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index a10ca15bc373..888bb7db293b 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -264,10 +264,12 @@ def test_datetime_scalar_construction(self): # Some basic strings and repr assert_equal(str(np.datetime64('NaT')), 'NaT') assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT')") + "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") + assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + "np.datetime64('NaT','ns')") # None gets constructed as NaT assert_equal(np.datetime64(None), np.datetime64('NaT')) From f13cf6e5748c8282a161a990cc7abd0859faaf75 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 22 Jul 2025 02:17:53 -0600 Subject: [PATCH 225/294] MAINT/BUG: Followups for PySequence_Fast locking (#29405) * MAINT: break up NPY_ALLOC_WORKSPACE into sub-macros that define and then init * MAINT: remove now-unneeded NO_BRACKETS critical section variants * BUG: always pass the argument to PySequence_Fast to BEGIN_CRITICAL_SECTION_SEQUENCE_FAST --- numpy/_core/src/common/npy_pycompat.h | 43 ++++++------------- numpy/_core/src/multiarray/alloc.h | 9 +++- numpy/_core/src/multiarray/iterators.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 13 +++--- numpy/_core/src/multiarray/nditer_pywrap.c | 18 +++++--- 5 files changed, 37 insertions(+), 48 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index f751af666524..605833a511b7 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -15,45 +15,26 @@ // // These are tweaked versions of macros defined in CPython in // pycore_critical_section.h, originally added in CPython commit baf347d91643. -// They're defined in terms of the NPY_*_CRITICAL_SECTION_NO_BRACKETS to avoid -// repition and should behave identically to the versions in CPython. Once the -// macros are expanded, The only difference relative to those versions is the +// They should behave identically to the versions in CPython. Once the +// macros are expanded, the only difference relative to those versions is the // use of public C API symbols that are equivalent to the ones used in the // corresponding CPython definitions. #define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ { \ - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS( \ - original, npy_cs_fast) + PyObject *_orig_seq = (PyObject *)(original); \ + const int _should_lock_cs = \ + PyList_CheckExact(_orig_seq); \ + PyCriticalSection _cs_fast; \ + if (_should_lock_cs) { \ + PyCriticalSection_Begin(&_cs_fast, _orig_seq); \ + } #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(npy_cs_fast) \ - } - -// These macros are more flexible than the versions in the public CPython C API, -// but that comes at a cost. Here are some differences and limitations: -// -// * cs_name is a named label for the critical section. If you must nest -// critical sections, do *not* use the same name for multiple nesting -// critical sections. -// * The beginning and ending macros must happen within the same scope -// and the compiler won't necessarily enforce that. -// * The macros ending critical sections accept a named label. The label -// must match the opening critical section. -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) \ - PyObject *_##cs_name##_orig_seq = (PyObject *)(original); \ - const int _##cs_name##_should_lock_cs = \ - PyList_CheckExact(_##cs_name##_orig_seq); \ - PyCriticalSection _##cs_name; \ - if (_##cs_name##_should_lock_cs) { \ - PyCriticalSection_Begin(&_##cs_name, _##cs_name##_orig_seq); \ - } -#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) \ - if (_##cs_name##_should_lock_cs) { \ - PyCriticalSection_End(&_##cs_name); \ + if (_should_lock_cs) { \ + PyCriticalSection_End(&_cs_fast); \ + } \ } #else -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) #define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { -#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } #endif diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index 8cd763f971ed..bef6407a28a3 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -93,11 +93,16 @@ _npy_init_workspace( * With some caches, it may be possible to malloc/calloc very quickly in which * case we should not hesitate to replace this pattern. */ -#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ +#define NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ TYPE NAME##_static[fixed_size]; \ - TYPE *NAME; \ + TYPE *NAME; +#define NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) \ _npy_init_workspace((void **)&NAME, NAME##_static, (fixed_size), sizeof(TYPE), (size)) +#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ + NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ + NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) + static inline void _npy_free_workspace(void *buf, void *static_buf) diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index f45328a2bb75..7557662ad56c 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -1494,7 +1494,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, if (fast_seq == NULL) { return NULL; } - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(fast_seq) + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args) n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { ret = multiiter_wrong_number_of_args(); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 732919c347b0..3d82e6c7f448 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2762,20 +2762,20 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; - PyObject *item; + PyObject *item, *seq; - obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK + seq = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); - if (obj == NULL) { + if (seq == NULL) { return -1; } NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); - size = PySequence_Size(obj); + size = PySequence_Size(seq); for (i = 0; i < size; ++i) { - item = PySequence_Fast_GET_ITEM(obj, i); + item = PySequence_Fast_GET_ITEM(seq, i); /* Ellipsis */ if (item == Py_Ellipsis) { if (ellipsis) { @@ -2837,10 +2837,9 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) cleanup:; NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); - Py_DECREF(obj); + Py_DECREF(seq); return ret; - } /* diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 86198e9cd9f4..b68f7ad9708d 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -725,8 +725,12 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) int pre_alloc_fail = 0; int post_alloc_fail = 0; int nop; + NPY_DEFINE_WORKSPACE(op, PyArrayObject *, 2 * 8); + NPY_DEFINE_WORKSPACE(op_flags, npy_uint32, 8); + NPY_DEFINE_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS); + NPY_DEFINE_WORKSPACE(op_axes, int *, 8); - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(op_in, nditer_cs); + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(op_in); nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); if (nop < 0) { @@ -735,7 +739,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) } /* allocate workspace for Python objects (operands and dtypes) */ - NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); + NPY_INIT_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); if (op == NULL) { pre_alloc_fail = 1; goto cleanup; @@ -744,9 +748,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) op_request_dtypes = (PyArray_Descr **)(op + nop); /* And other workspaces (that do not need to clean up their content) */ - NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); - NPY_ALLOC_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); - NPY_ALLOC_WORKSPACE(op_axes, int *, 8, nop); + NPY_INIT_WORKSPACE(op_flags, npy_uint32, 8, nop); + NPY_INIT_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); + NPY_INIT_WORKSPACE(op_axes, int *, 8, nop); /* * Trying to allocate should be OK if one failed, check for error now * that we can use `goto finish` to clean up everything. @@ -763,9 +767,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) goto cleanup; } -cleanup: +cleanup:; - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(nditer_cs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); if (pre_alloc_fail) { goto pre_alloc_fail; From 8f68377c0e90f8ff9a996e801ad0ba2c5971ec3b Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Tue, 22 Jul 2025 06:31:01 -0400 Subject: [PATCH 226/294] BUG: Any dtype should call `square` on `arr ** 2` (#29392) * BUG: update fast_scalar_power to handle special-case squaring for any array type except object arrays * BUG: fix missing declaration * TST: add test to ensure `arr**2` calls square for structured dtypes * STY: remove whitespace * BUG: replace new variable `is_square` with direct op comparison in `fast_scalar_power` function --- numpy/_core/src/multiarray/number.c | 8 +++++++- numpy/_core/tests/test_multiarray.py | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index b801d7e041e2..de4012641684 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -332,6 +332,7 @@ static int fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) { PyObject *fastop = NULL; + if (PyLong_CheckExact(o2)) { int overflow = 0; long exp = PyLong_AsLongAndOverflow(o2, &overflow); @@ -363,7 +364,12 @@ fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) } PyArrayObject *a1 = (PyArrayObject *)o1; - if (!(PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1))) { + if (PyArray_ISOBJECT(a1)) { + return 1; + } + if (fastop != n_ops.square && !PyArray_ISFLOAT(a1) && !PyArray_ISCOMPLEX(a1)) { + // we special-case squaring for any array type + // gh-29388 return 1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 04222025883e..e7647d2e23fe 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4215,6 +4215,13 @@ def pow_for(exp, arr): assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + def test_pos_array_ufunc_override(self): class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): From 1016f8a9abf715b5cd84440e2d27c36faf442046 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Tue, 22 Jul 2025 12:53:49 -0700 Subject: [PATCH 227/294] DOC: Fix index name in notes for np.take --- numpy/_core/fromnumeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 9d01fca8aa32..1eeae1f33a01 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -170,7 +170,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): - for kk in ndindex(Nj): + for kk in ndindex(Nk): out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] For this reason, it is equivalent to (but faster than) the following use From 436662ee6cd73e959d5eff375f3e8eec5e0be8a2 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 13:57:43 +0100 Subject: [PATCH 228/294] TYP: Type ``MaskedArray.__deepcopy__`` and ``MaskedArray.argsort`` (#29418) --- numpy/__init__.pyi | 2 +- numpy/ma/core.py | 2 +- numpy/ma/core.pyi | 13 +++++++++++-- numpy/typing/tests/data/fail/ma.pyi | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 6 ++++++ numpy/typing/tests/data/reveal/ndarray_misc.pyi | 4 ++-- 6 files changed, 23 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d8c57c87cbbe..a196c000b6bc 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1721,7 +1721,7 @@ class _ArrayOrScalarCommon: order: str | Sequence[str] | None = ..., *, stable: bool | None = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e7d8a20f6c6c..84e029439725 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5628,7 +5628,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, is used. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. - order : list, optional + order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a3d05556f7ce..a4bfeb3ffab7 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1398,7 +1398,16 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... + def argsort( + self, + axis: SupportsIndex | _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool = False, + ) -> _MaskedArray[intp]: ... # Keep in-sync with np.ma.argmin @overload # type: ignore[override] @@ -1703,7 +1712,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # def __reduce__(self): ... - def __deepcopy__(self, memo=...): ... + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 5dc6706ebf81..160d30f885e3 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -141,3 +141,5 @@ MAR_c //= 2 # type: ignore[misc] MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] + +MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 1d44c3f8ba3c..9aae83eda366 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -405,6 +405,12 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) + +assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) + def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 7f0a214f8f52..b219f4e5bec2 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -70,8 +70,8 @@ assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) -assert_type(f8.argsort(), npt.NDArray[Any]) -assert_type(AR_f8.argsort(), npt.NDArray[Any]) +assert_type(f8.argsort(), npt.NDArray[np.intp]) +assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) assert_type(AR_f8.choose([0]), npt.NDArray[Any]) From e766381f73c29c7ef4c0d7f8d58012c78914c15c Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 16:31:36 +0100 Subject: [PATCH 229/294] TYP: Type ``MaskedArray.view`` (#29383) --- numpy/__init__.pyi | 4 +-- numpy/ma/core.pyi | 47 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 12 +++++++ 3 files changed, 60 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a196c000b6bc..934fe7ff0287 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2575,14 +2575,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (dtype: T) def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_ScalarT]) -> NDArray[_ScalarT]: ... + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... @overload # (type: T) def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... @overload # (_: T) def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... @overload # (dtype: ?) def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... - @overload # (dtype: ?, type: type[T]) + @overload # (dtype: ?, type: T) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a4bfeb3ffab7..205dc83fad67 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -17,6 +17,7 @@ from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( + _HasDType, _HasDTypeWithRealAndImag, _ModeKind, _OrderKACF, @@ -76,12 +77,14 @@ from numpy._typing import ( _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _DTypeLike, _DTypeLikeBool, _IntLike_co, _ScalarLike_co, _Shape, _ShapeLike, ) +from numpy._typing._dtype_like import _VoidDTypeLike __all__ = [ "MAError", @@ -453,7 +456,49 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... def __array_finalize__(self, obj): ... def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def view(self, dtype=..., type=..., fill_value=...): ... + + @overload # () + def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view( + self, + /, + dtype: _DTypeT | _HasDType[_DTypeT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view( + self, + /, + dtype: _DTypeLike[_ScalarT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view( + self, + /, + dtype: DTypeLike | None = None, + *, + type: type[_ArrayT], + fill_value: _ScalarLike_co | None = None + ) -> _ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ArrayT, /) + def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ?) + def view( + self, + /, + # `_VoidDTypeLike | str | None` is like `DTypeLike` but without `_DTypeLike[Any]` to avoid + # overlaps with previous overloads. + dtype: _VoidDTypeLike | str | None, + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype]: ... + def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... @property diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 9aae83eda366..6cad138b9dc5 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -405,6 +405,18 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.view(), MaskedArray[np.float64]) +assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.dtype(np.float32)), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32]) +assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype='float32'), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype='float32', type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) +assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) + assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) From 7845ef8a42eb8a87830b704b434f4939be0e7591 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Jul 2025 17:18:56 +0000 Subject: [PATCH 230/294] MAINT: Bump github/codeql-action from 3.29.3 to 3.29.4 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.3 to 3.29.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/d6bbdef45e766d081b84a2def353b0055f728d3e...4e828ff8d448a8a6e532957b1811f387a63867e8) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 71abe29f9d09..1aea33f531f4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/autobuild@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index de3d582f4750..84fbe1f03cb1 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v2.1.27 + uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v2.1.27 with: sarif_file: results.sarif From a0bb9edfb91c77b241294a5dba9e26158fb3ec1f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 23 Jul 2025 19:22:38 +0200 Subject: [PATCH 231/294] DOC: Remove outdated `numpy.exceptions` compatibility note. The exceptions were removed from the global `numpy` namespace in 2.0 (#24316). --- numpy/exceptions.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 0e8688ae9eba..cf70b4a4ce3b 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -7,8 +7,7 @@ .. versionadded:: NumPy 1.25 - The exceptions module is new in NumPy 1.25. Older exceptions remain - available through the main NumPy namespace for compatibility. + The exceptions module is new in NumPy 1.25. .. currentmodule:: numpy.exceptions From 8b2321d03c1abce671a15f9d08a97249bd2d161d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 18:31:05 +0100 Subject: [PATCH 232/294] TYP: Type ``MaskedArray.reshape`` (#29404) Co-authored-by: Joren Hammudoglu --- numpy/__init__.pyi | 1 + numpy/ma/core.pyi | 87 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 8 +++ 3 files changed, 95 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 934fe7ff0287..302f0cdf15fc 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2468,6 +2468,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 205dc83fad67..0a039831a7d4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -17,9 +17,11 @@ from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( + _AnyShapeT, _HasDType, _HasDTypeWithRealAndImag, _ModeKind, + _OrderACF, _OrderKACF, _PartitionKind, _SortKind, @@ -1126,7 +1128,90 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... - def reshape(self, *s, **kwargs): ... + + # Keep in sync with `ndarray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 6cad138b9dc5..f2b7aa215c7a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -27,6 +27,7 @@ AR_LIKE_dt64: list[np.datetime64] AR_LIKE_o: list[np.object_] AR_number: NDArray[np.number] +MAR_c8: MaskedArray[np.complex64] MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] @@ -399,6 +400,13 @@ assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) assert_type(MAR_f8.round(), MaskedArray[np.float64]) assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) +assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) +assert_type(MAR_c8.reshape(2, 3, 4, 5), np.ma.MaskedArray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) +assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) + assert_type(MAR_f8.cumprod(), MaskedArray[Any]) assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) From eecfbc544dcaafa49003a8a2cd113ea46d763be4 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 24 Jul 2025 16:22:40 +0800 Subject: [PATCH 233/294] ENH: Enable RVV acceleration for auto-vectorization in RISC-V Signed-off-by: Wang Yang --- numpy/_core/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 1b79aa39781c..ba482d0cf5a0 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1068,6 +1068,7 @@ foreach gen_mtargets : [ VSX2, VX, LSX, + RVV, ] ], [ From 66ffb4c45f366b57b43457d531eed6e256f32086 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 24 Jul 2025 17:02:52 +0800 Subject: [PATCH 234/294] ci: rerun workflow due to mirror sync issue --- numpy/_core/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index ba482d0cf5a0..b4c769810ad8 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1068,7 +1068,7 @@ foreach gen_mtargets : [ VSX2, VX, LSX, - RVV, + RVV ] ], [ From 3702c9ab93c2b732b200dd19ac0670c68fb0702c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Jul 2025 13:17:03 +0000 Subject: [PATCH 235/294] MAINT: Bump larsoner/circleci-artifacts-redirector-action Bumps [larsoner/circleci-artifacts-redirector-action](https://github.com/larsoner/circleci-artifacts-redirector-action) from 1.1.0 to 1.2.0. - [Release notes](https://github.com/larsoner/circleci-artifacts-redirector-action/releases) - [Commits](https://github.com/larsoner/circleci-artifacts-redirector-action/compare/7eafdb60666f57706a5525a2f5eb76224dc8779b...839631420e45a08af893032e5a5e8843bf47e8ff) --- updated-dependencies: - dependency-name: larsoner/circleci-artifacts-redirector-action dependency-version: 1.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/circleci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index 3c84ce3c6890..12c51735bf81 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@7eafdb60666f57706a5525a2f5eb76224dc8779b # master + uses: larsoner/circleci-artifacts-redirector-action@839631420e45a08af893032e5a5e8843bf47e8ff # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} From f093acd2ef1804787b9976bd6d92bf75e8169e70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Jul 2025 13:17:08 +0000 Subject: [PATCH 236/294] MAINT: Bump pypa/cibuildwheel from 3.0.1 to 3.1.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.1 to 3.1.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/95d2f3a92fbf80abe066b09418bbf128a8923df2...ffd835cef18fa11522f608fc0fa973b89f5ddc87) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index ce0c2e803143..dbfb0ad48960 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 + - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index fd2047283a1f..ca41042e1ad3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 + uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 30310834a888aa10863dc57de44b547757dd4466 Mon Sep 17 00:00:00 2001 From: "Aleksandr A. Voyt" Date: Thu, 24 Jul 2025 15:18:57 +0300 Subject: [PATCH 237/294] BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR symlink (#29434) Ensure `TestNumpyConfig.test_configtool_pkgconfigdir` resolves `PKG_CONFIG_DIR` using `.resolve()` to match the resolved path from `_configtool.py`. --- numpy/tests/test_configtool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index a9c23b5cc007..c4e9a9551c0c 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -35,7 +35,7 @@ def test_configtool_cflags(self): def test_configtool_pkgconfigdir(self): stdout = self.check_numpyconfig('--pkgconfigdir') - assert pathlib.Path(stdout) == PKG_CONFIG_DIR + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() @pytest.mark.skipif(not IS_INSTALLED, From 439f7632ab2a828d70a30d16d3104f8c9f5c1f13 Mon Sep 17 00:00:00 2001 From: kostayScr <11485271+kostayScr@users.noreply.github.com> Date: Thu, 24 Jul 2025 17:59:28 +0300 Subject: [PATCH 238/294] BUG: fix datetime/timedelta hash memory leak (#29411) * BUG: fix datetime/timedelta hash memory leak * get metadata directly from scalar object Co-authored-by: Nathan Goldbaum * BUG: remove unnecessary Py_DECREF Co-authored-by: Nathan Goldbaum * BUG: remove dtype variable declaration that is not used anymore --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/src/multiarray/scalartypes.c.src | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 5724afda7e47..5e3a3ba71d3e 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -3911,7 +3911,6 @@ static npy_hash_t @lname@_arrtype_hash(PyObject *obj) { PyArray_DatetimeMetaData *meta; - PyArray_Descr *dtype; npy_@lname@ val = PyArrayScalar_VAL(obj, @name@); if (val == NPY_DATETIME_NAT) { @@ -3919,10 +3918,10 @@ static npy_hash_t return PyBaseObject_Type.tp_hash(obj); } - dtype = PyArray_DescrFromScalar(obj); - meta = get_datetime_metadata_from_dtype(dtype); + meta = &((PyDatetimeScalarObject *)obj)->obmeta; - return @lname@_hash(meta, val); + npy_hash_t res = @lname@_hash(meta, val); + return res; } /**end repeat**/ From a4906a744754260e2ad166c14ca1b9ff8926cb6c Mon Sep 17 00:00:00 2001 From: Zebreus Date: Thu, 24 Jul 2025 17:32:17 +0200 Subject: [PATCH 239/294] BLD: allow targeting webassembly without emscripten --- numpy/_core/src/multiarray/lowlevel_strided_loops.c.src | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 0c4eb3dd9a8d..0a69a08e678e 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -850,7 +850,7 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * // Enable auto-vectorization for floating point casts with clang #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(ignore)") #endif @@ -965,7 +965,7 @@ static GCC_CAST_OPT_LEVEL int #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(strict)") #endif From 9812a3f701abc1030f93037a1165b2c0a9c8d101 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 24 Jul 2025 22:30:57 +0100 Subject: [PATCH 240/294] DOC: Correct more ndarray defaults (#29402) --- numpy/_core/_add_newdocs.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 90d33d4b810a..ed8cf50ee360 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -3080,7 +3080,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('all', """ - a.all(axis=None, out=None, keepdims=False, *, where=True) + a.all(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if all elements evaluate to True. @@ -3095,7 +3095,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('any', """ - a.any(axis=None, out=None, keepdims=False, *, where=True) + a.any(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if any of the elements of `a` evaluate to True. @@ -3303,7 +3303,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', """ - a.clip(min=None, max=None, out=None, **kwargs) + a.clip(min=np._NoValue, max=np._NoValue, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3708,7 +3708,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('max', """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) + a.max(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the maximum along a given axis. @@ -3723,7 +3723,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', """ - a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) + a.mean(axis=None, dtype=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns the average of the array elements along given axis. @@ -3738,7 +3738,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('min', """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) + a.min(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the minimum along a given axis. @@ -3768,8 +3768,8 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, - initial=1, where=True) + a.prod(axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue) Return the product of the array elements over the given axis @@ -4240,7 +4240,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4255,7 +4255,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', """ - a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) + a.sum(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the sum of the array elements over the given axis. @@ -4518,7 +4518,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the variance of the array elements, along given axis. From ec816336bcc042794418693508e678164afd3b25 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 24 Jul 2025 16:34:43 -0600 Subject: [PATCH 241/294] MAINT: Update main after 2.3.2 release. - Forward port 2.3.2-changelog.rst, - Forward port 2.3.2-notes.rst, - Update release.rst [skip azp] [skip cirrus] [skip actions] --- doc/changelog/2.3.2-changelog.rst | 38 ++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.2-notes.rst | 56 ++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) create mode 100644 doc/changelog/2.3.2-changelog.rst create mode 100644 doc/source/release/2.3.2-notes.rst diff --git a/doc/changelog/2.3.2-changelog.rst b/doc/changelog/2.3.2-changelog.rst new file mode 100644 index 000000000000..5c893a510ae7 --- /dev/null +++ b/doc/changelog/2.3.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) diff --git a/doc/source/release.rst b/doc/source/release.rst index 59e6dd07b002..3af644a57562 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.2 2.3.1 2.3.0 2.2.6 diff --git a/doc/source/release/2.3.2-notes.rst b/doc/source/release/2.3.2-notes.rst new file mode 100644 index 000000000000..2acc400c89fe --- /dev/null +++ b/doc/source/release/2.3.2-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.2 Release Notes +========================= + +The NumPy 2.3.2 release is a patch release with a number of bug fixes and +maintenance updates. The highlights are: + +- Wheels for Python 3.14.0rc1 +- PyPy updated to the latest stable release +- OpenBLAS updated to 0.3.30 + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) + + From 7c00e3ea28a556a9d6fcc69baac67d7896fbc0e1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 25 Jul 2025 08:02:18 -0600 Subject: [PATCH 242/294] MAINT: Add Python 3.14 to classifier. [skip azp] [skip cirrus] [skip actions] --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index b0e58705ebd1..f9cfeadee599 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,7 @@ classifiers = [ 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', + 'Programming Language :: Python :: 3.14', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', From c1bd73c9066db667d7b9138e1d3cb0c8db413559 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 25 Jul 2025 09:09:12 -0600 Subject: [PATCH 243/294] DOC: Update RELEASE_WALKTHROUGH.rst Update the release documentation to reflect current practice. [skip azp] [skip cirrus] [skip actions] --- doc/RELEASE_WALKTHROUGH.rst | 67 +++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 36 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 6d2194b5c4e6..c80c7d0463eb 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -26,18 +26,16 @@ Prior to release Add/drop Python versions ------------------------ -When adding or dropping Python versions, three files need to be edited: +When adding or dropping Python versions, two files need to be edited: - .github/workflows/wheels.yml # for github cibuildwheel -- tools/ci/cirrus_wheels.yml # for cibuildwheel aarch64/arm64 builds - pyproject.toml # for classifier and minimum version check. Make these changes in an ordinary PR against main and backport if necessary. Add ``[wheel build]`` at the end of the title line of the commit summary so that wheel builds will be run to test the changes. We currently release wheels for new Python versions after the first Python rc once manylinux and -cibuildwheel support it. For Python 3.11 we were able to release within a week -of the rc1 announcement. +cibuildwheel support it. Backport pull requests @@ -46,6 +44,7 @@ Backport pull requests Changes that have been marked for this release must be backported to the maintenance/2.1.x branch. + Update 2.1.0 milestones ----------------------- @@ -79,7 +78,8 @@ commit message might be something like:: Set the release version ----------------------- -Check the ``pyproject.toml`` file and set the release version if needed:: +Check the ``pyproject.toml`` file and set the release version and update the +classifier if needed:: $ gvim pyproject.toml @@ -89,7 +89,7 @@ Check the ``pavement.py`` and ``doc/source/release.rst`` files Check that the ``pavement.py`` file points to the correct release notes. It should have been updated after the last release, but if not, fix it now. Also make -sure that the notes have an entry in the ``release.rst`` file:: +sure that the release notes have an entry in the ``release.rst`` file:: $ gvim pavement.py doc/source/release.rst @@ -102,13 +102,12 @@ The changelog is generated using the changelog tool:: $ spin changelog $GITHUB v2.0.0..maintenance/2.1.x > doc/changelog/2.1.0-changelog.rst where ``GITHUB`` contains your GitHub access token. The text will need to be -checked for non-standard contributor names and dependabot entries removed. It -is also a good idea to remove any links that may be present in the PR titles -as they don't translate well to markdown, replace them with monospaced text. The -non-standard contributor names should be fixed by updating the ``.mailmap`` -file, which is a lot of work. It is best to make several trial runs before -reaching this point and ping the malefactors using a GitHub issue to get the -needed information. +checked for non-standard contributor names. It is also a good idea to remove +any links that may be present in the PR titles as they don't translate well to +markdown, replace them with monospaced text. The non-standard contributor names +should be fixed by updating the ``.mailmap`` file, which is a lot of work. It +is best to make several trial runs before reaching this point and ping the +malefactors using a GitHub issue to get the needed information. Finish the release notes @@ -170,26 +169,23 @@ If you need to delete the tag due to error:: --------------- Tagging the build at the beginning of this process will trigger a wheel build -via cibuildwheel and upload wheels and an sdist to the staging repo. The CI run -on github actions (for all x86-based and macOS arm64 wheels) takes about 1 1/4 -hours. The CI runs on cirrus (for aarch64 and M1) take less time. You can check -for uploaded files at the `staging repository`_, but note that it is not -closely synched with what you see of the running jobs. +via cibuildwheel and upload wheels and an sdist to the staging repo. All wheels +are currently built on GitHub actions and take about 1 1/4 hours to build. If you wish to manually trigger a wheel build, you can do so: -- On github actions -> `Wheel builder`_ there is a "Run workflow" button, click +- On GitHub actions -> `Wheel builder`_ there is a "Run workflow" button, click on it and choose the tag to build -- On Cirrus we don't currently have an easy way to manually trigger builds and - uploads. -If a wheel build fails for unrelated reasons, you can rerun it individually: +If some wheel builds fail for unrelated reasons, you can re-run them: + +- On GitHub actions select `Wheel builder`_ click on the task that contains + the build you want to re-run, it will have the tag as the branch. On the + upper right will be a re-run button, hit it and select "re-run failed" -- On github actions select `Wheel builder`_ click on the commit that contains - the build you want to rerun. On the left there is a list of wheel builds, - select the one you want to rerun and on the resulting page hit the - counterclockwise arrows button. -- On cirrus, log into cirrusci, look for the v2.1.0 tag and rerun the failed jobs. +If some wheels fail to upload to anaconda, you can select those builds in the +`Wheel builder`_ and manually download the build artifact. This is a temporary +workaround, but sometimes the quickest way to get a release out. .. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files .. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml @@ -218,20 +214,19 @@ file is updated for continued development:: 5. Upload to PyPI ----------------- -Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed -after recent PyPI changes, version ``3.4.1`` was used here:: +Upload to PyPI using ``twine``:: $ cd ../numpy $ twine upload release/installers/*.whl $ twine upload release/installers/*.gz # Upload last. -If one of the commands breaks in the middle, you may need to selectively upload -the remaining files because PyPI does not allow the same file to be uploaded -twice. The source file should be uploaded last to avoid synchronization -problems that might occur if pip users access the files while this is in -process, causing pip to build from source rather than downloading a binary -wheel. PyPI only allows a single source distribution, here we have -chosen the zip archive. +The source file should be uploaded last to avoid synchronization problems that +might occur if pip users access the files while this is in process, causing pip +to build from source rather than downloading a binary wheel. PyPI only allows a +single source distribution, here we have chosen the gz version. If the +uploading breaks because of network related reasons, you can try re-running the +commands, possibly after a fix. Twine will now handle the error generated by +PyPI when the same file is uploaded twice. 6. Upload files to GitHub From 3203001d84ad387253697f1ba4af805093dc338e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 27 Jul 2025 15:39:25 -0600 Subject: [PATCH 244/294] MAINT: Replace pavement.py Replace `pavement.py` functionality with a `write_release.py` script. This gets rid of a paver dependency and simplifies the release process. - Add tools/write_release.py - Remove pavement.py [skip azp] [skip cirrus] [skip actions] --- doc/RELEASE_WALKTHROUGH.rst | 12 +- pavement.py | 184 -------------------------- requirements/release_requirements.txt | 3 - tools/write_release.py | 121 +++++++++++++++++ 4 files changed, 126 insertions(+), 194 deletions(-) delete mode 100644 pavement.py create mode 100644 tools/write_release.py diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c80c7d0463eb..a3787ae95ee5 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -84,14 +84,12 @@ classifier if needed:: $ gvim pyproject.toml -Check the ``pavement.py`` and ``doc/source/release.rst`` files --------------------------------------------------------------- +Check the ``doc/source/release.rst`` file +----------------------------------------- -Check that the ``pavement.py`` file points to the correct release notes. It should -have been updated after the last release, but if not, fix it now. Also make -sure that the release notes have an entry in the ``release.rst`` file:: +make sure that the release notes have an entry in the ``release.rst`` file:: - $ gvim pavement.py doc/source/release.rst + $ gvim doc/source/release.rst Generate the changelog @@ -208,7 +206,7 @@ Anaconda staging directory using the ``tools/download-wheels.py`` script:: This needs to be done after all installers are downloaded, but before the pavement file is updated for continued development:: - $ paver write_release + $ python write_release 2.1.0 5. Upload to PyPI diff --git a/pavement.py b/pavement.py deleted file mode 100644 index 369b8703b0ba..000000000000 --- a/pavement.py +++ /dev/null @@ -1,184 +0,0 @@ -r""" -This paver file is intended to help with the release process as much as -possible. It relies on virtualenv to generate 'bootstrap' environments as -independent from the user system as possible (e.g. to make sure the sphinx doc -is built against the built numpy, not an installed one). - -Building changelog + notes -========================== - -Assumes you have git and the binaries/tarballs in installers/:: - - paver write_release - paver write_note - -This automatically put the checksum into README.rst, and writes the Changelog. - -TODO -==== - - the script is messy, lots of global variables - - make it more easily customizable (through command line args) - - missing targets: install & test, sdist test, debian packaging - - fix bdist_mpkg: we build the same source twice -> how to make sure we use - the same underlying python for egg install in venv and for bdist_mpkg -""" -import hashlib -import os -import textwrap - -# The paver package needs to be installed to run tasks -import paver -from paver.easy import Bunch, options, sh, task - -#----------------------------------- -# Things to be changed for a release -#----------------------------------- - -# Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.4.0-notes.rst' - - -#------------------------------------------------------- -# Hardcoded build/install dirs, virtualenv options, etc. -#------------------------------------------------------- - -# Where to put the release installers -options(installers=Bunch(releasedir="release", - installersdir=os.path.join("release", "installers")),) - - -#------------- -# README stuff -#------------- - -def _compute_hash(idirs, hashfunc): - """Hash files using given hashfunc. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - hashfunc : hash function - Function to be used to hash the files. - - """ - released = paver.path.path(idirs).listdir() - checksums = [] - for fpath in sorted(released): - with open(fpath, 'rb') as fin: - fhash = hashfunc(fin.read()) - checksums.append( - f'{fhash.hexdigest()} {os.path.basename(fpath)}') - return checksums - - -def compute_md5(idirs): - """Compute md5 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - return _compute_hash(idirs, hashlib.md5) - - -def compute_sha256(idirs): - """Compute sha256 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - # better checksum so gpg signed README.rst containing the sums can be used - # to verify the binaries instead of signing all binaries - return _compute_hash(idirs, hashlib.sha256) - - -def write_release_task(options, filename='README'): - """Append hashes of release files to release notes. - - This appends file hashes to the release notes and creates - four README files of the result in various formats: - - - README.rst - - README.rst.gpg - - README.md - - README.md.gpg - - The md file are created using `pandoc` so that the links are - properly updated. The gpg files are kept separate, so that - the unsigned files may be edited before signing if needed. - - Parameters - ---------- - options : - Set by ``task`` decorator. - filename : str - Filename of the modified notes. The file is written - in the release directory. - - """ - idirs = options.installers.installersdir - notes = paver.path.path(RELEASE_NOTES) - rst_readme = paver.path.path(filename + '.rst') - md_readme = paver.path.path(filename + '.md') - - # append hashes - with open(rst_readme, 'w') as freadme: - with open(notes) as fnotes: - freadme.write(fnotes.read()) - - freadme.writelines(textwrap.dedent( - """ - Checksums - ========= - - MD5 - --- - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_md5(idirs)]) - - freadme.writelines(textwrap.dedent( - """ - SHA256 - ------ - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)]) - - # generate md file using pandoc before signing - sh(f"pandoc -s -o {md_readme} {rst_readme}") - - # Sign files - if hasattr(options, 'gpg_key'): - cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}' - else: - cmd = 'gpg --clearsign --armor' - - sh(cmd + f' --output {rst_readme}.gpg {rst_readme}') - sh(cmd + f' --output {md_readme}.gpg {md_readme}') - - -@task -def write_release(options): - """Write the README files. - - Two README files are generated from the release notes, one in ``rst`` - markup for the general release, the other in ``md`` markup for the github - release notes. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - rdir = options.installers.releasedir - write_release_task(options, os.path.join(rdir, 'README')) diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index d23e69fa1fa8..eaa092560d2d 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -12,8 +12,5 @@ gitpython>=3.1.30 # uploading wheels twine -# building and notes -Paver - # uploading release documentation packaging diff --git a/tools/write_release.py b/tools/write_release.py new file mode 100644 index 000000000000..f86113acde8e --- /dev/null +++ b/tools/write_release.py @@ -0,0 +1,121 @@ +""" +Standalone script for writing release doc:: + + python tools/write_release + +Example:: + + python tools/write_release.py 1.7.0 + +Needs to be run from the root of the repository and assumes +that the output is in `release` and wheels and sdist in +`release/installers`. + +Translation from rst to md markdown requires Pandoc, you +will need to rely on your distribution to provide that. + +""" +import os +import subprocess +import textwrap +import argparse +from hashlib import md5 +from hashlib import sha256 +from pathlib import Path + +# Name of the notes directory +NOTES_DIR = "doc/source/release" +# Name of the output directory +OUTPUT_DIR = "release" +# Output base name, `.rst` or `.md` will be appended +OUTPUT_FILE = "README" + +def compute_hash(wheel_dir, hash_func): + """ + Compute hashes of files in wheel_dir. + + Parameters + ---------- + wheel_dir: str + Path to wheel directory from repo root. + hash_func: function + Hash function, i.e., md5, sha256, etc. + + Returns + ------- + list_of_strings: list + List of of strings. Each string is the hash + followed by the file basename. + + """ + released = os.listdir(wheel_dir) + checksums = [] + for fn in sorted(released): + fn_path = Path(f"{wheel_dir}/{fn}") + with open(fn_path, 'rb') as f: + m = hash_func(f.read()) + checksums.append(f"{m.hexdigest()} {fn}") + return checksums + + +def write_release(version): + """ + Copy the -notes.rst file to the OUTPUT_DIR, append + the md5 and sha256 hashes of the wheels and sdist, and produce + README.rst and README.md files. + + Parameters + ---------- + version: str + Release version, e.g., '2.3.2', etc. + + Returns + ------- + None. + + """ + notes = Path(f"{NOTES_DIR}/{version}-notes.rst") + wheel_dir = Path(f"{OUTPUT_DIR}/installers") + target_md = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.md") + target_rst = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.rst") + + os.system(f"cp {notes} {target_rst}") + + with open(str(target_rst), 'a') as f: + f.writelines(textwrap.dedent( + """ + Checksums + ========= + + MD5 + --- + :: + + """)) + f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, md5)]) + + f.writelines(textwrap.dedent( + """ + SHA256 + ------ + :: + + """)) + f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) + + # translate README.rst to md for posting on GitHub + rst_to_md = subprocess.Popen( + ["pandoc", "-s", "-o", f"{target_md}", f"{target_rst}", "--wrap=preserve"], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, error = rst_to_md.communicate() + if not rst_to_md.returncode == 0: + raise RuntimeError(f"{error} failed") + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "version", + help="NumPy version of the release, e.g. 2.3.2, etc.") + + args = parser.parse_args() + write_release(args.version) From d9acbceec552ecf6e737821b3c2385cfef590b77 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 28 Jul 2025 11:33:55 -0600 Subject: [PATCH 245/294] MAINT: Update tools/write_release.py Co-authored-by: Joren Hammudoglu --- tools/write_release.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index f86113acde8e..5e976fe564f4 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -52,8 +52,7 @@ def compute_hash(wheel_dir, hash_func): checksums = [] for fn in sorted(released): fn_path = Path(f"{wheel_dir}/{fn}") - with open(fn_path, 'rb') as f: - m = hash_func(f.read()) + m = hash_func(fn_path.read_bytes()) checksums.append(f"{m.hexdigest()} {fn}") return checksums @@ -74,10 +73,10 @@ def write_release(version): None. """ - notes = Path(f"{NOTES_DIR}/{version}-notes.rst") - wheel_dir = Path(f"{OUTPUT_DIR}/installers") - target_md = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.md") - target_rst = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.rst") + notes = Path(NOTES_DIR) / f"{version}-notes.rst" + wheel_dir = Path(OUTPUT_DIR) / "installers" + target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" + target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" os.system(f"cp {notes} {target_rst}") @@ -104,12 +103,10 @@ def write_release(version): f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) # translate README.rst to md for posting on GitHub - rst_to_md = subprocess.Popen( - ["pandoc", "-s", "-o", f"{target_md}", f"{target_rst}", "--wrap=preserve"], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output, error = rst_to_md.communicate() - if not rst_to_md.returncode == 0: - raise RuntimeError(f"{error} failed") + subprocess.run( + ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], + check=True, + ) if __name__ == '__main__': parser = argparse.ArgumentParser() From 6bab2a5606094b47f96d9047caa67565793561ab Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 28 Jul 2025 11:57:44 -0600 Subject: [PATCH 246/294] MAINT: Fix linting errors. --- tools/write_release.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index 5e976fe564f4..7662eb7b1288 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -11,16 +11,15 @@ that the output is in `release` and wheels and sdist in `release/installers`. -Translation from rst to md markdown requires Pandoc, you +Translation from rst to md markdown requires Pandoc, you will need to rely on your distribution to provide that. """ +import argparse import os import subprocess import textwrap -import argparse -from hashlib import md5 -from hashlib import sha256 +from hashlib import md5, sha256 from pathlib import Path # Name of the notes directory @@ -51,7 +50,7 @@ def compute_hash(wheel_dir, hash_func): released = os.listdir(wheel_dir) checksums = [] for fn in sorted(released): - fn_path = Path(f"{wheel_dir}/{fn}") + fn_path = Path(f"{wheel_dir}/{fn}") m = hash_func(fn_path.read_bytes()) checksums.append(f"{m.hexdigest()} {fn}") return checksums @@ -77,7 +76,7 @@ def write_release(version): wheel_dir = Path(OUTPUT_DIR) / "installers" target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" - + os.system(f"cp {notes} {target_rst}") with open(str(target_rst), 'a') as f: @@ -107,7 +106,8 @@ def write_release(version): ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], check=True, ) - + + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( From 5591f1109b45a482bdaea3d3e44a80212a188edd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Mon, 28 Jul 2025 21:33:18 +0200 Subject: [PATCH 247/294] BLD: provide explicit control over cpu-baseline detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new `cpu-baseline-detect` feature flag that can be used to more precisely control the use of CPU baseline detection. This can be used by packages to more precisely control used SIMD code independently of compiler flags specified. The option follows typical feature semantics -- with `auto` preserving the current behavior of enabling when relevant compiler flags are found, `enabled` forcing it on based on the implicit compiler defaults, and `disabled` forcing it off. Signed-off-by: Michał Górny --- meson.options | 2 ++ meson_cpu/meson.build | 22 ++++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/meson.options b/meson.options index b09992fe9b91..f17f9901664a 100644 --- a/meson.options +++ b/meson.options @@ -28,6 +28,8 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') +option('cpu-baseline-detect', type: 'feature', value: 'auto', + description: 'Detect CPU baseline from the compiler flags') option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index e5b6d0fbe7be..1c4c6eecb308 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -46,20 +46,22 @@ if get_option('disable-optimization') CPU_CONF_BASELINE = 'none' CPU_CONF_DISPATCH = 'none' else - baseline_detect = false + baseline_detect = get_option('cpu-baseline-detect').enabled() c_args = get_option('c_args') - foreach arg : c_args - foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] - if arg.contains(carch) - message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') - baseline_detect = true + if get_option('cpu-baseline-detect').auto() + foreach arg : c_args + foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] + if arg.contains(carch) + message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') + baseline_detect = true + break + endif + endforeach + if baseline_detect break endif endforeach - if baseline_detect - break - endif - endforeach + endif # The required minimal set of required CPU features. CPU_CONF_BASELINE = get_option('cpu-baseline') if baseline_detect From bb7dd1ba604ba6c1766c6ca72fc21d378609f485 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 29 Jul 2025 11:57:28 +0100 Subject: [PATCH 248/294] TYP: Add test which hits `np.array` constructor overload with `object: _SupportsArray[_ArrayT]` (#29428) Co-authored-by: Joren Hammudoglu --- numpy/typing/tests/data/reveal/array_constructors.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 766547e54b60..99073ac4543a 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,7 +1,7 @@ import sys from collections import deque from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, Generic, TypeVar, assert_type import numpy as np import numpy.typing as npt @@ -10,12 +10,16 @@ _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) class SubClass(npt.NDArray[_ScalarT_co]): ... +class IntoSubClass(Generic[_ScalarT_co]): + def __array__(self) -> SubClass[_ScalarT_co]: ... + i8: np.int64 A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] D: SubClass[np.float64 | np.int64] +E: IntoSubClass[np.float64 | np.int64] mixed_shape: tuple[int, np.int64] @@ -39,6 +43,7 @@ assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +assert_type(np.array(E, subok=True), SubClass[np.float64 | np.int64]) # https://github.com/numpy/numpy/issues/29245 assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) From c6471c59bbf1cb95b50eae91773ccc2f5fe1436e Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 29 Jul 2025 15:44:00 +0100 Subject: [PATCH 249/294] TYP: Type ``MaskedArray.flat`` (#29466) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 4 ++-- numpy/typing/tests/data/pass/ma.py | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 0a039831a7d4..09edffc889a5 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -528,9 +528,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property - def flat(self): ... + def flat(self) -> MaskedIterator: ... @flat.setter - def flat(self, value): ... + def flat(self, value: ArrayLike, /) -> None: ... @property def fill_value(self): ... @fill_value.setter diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index b9be2b2e4384..793f7097ee81 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -34,6 +34,8 @@ MAR_f.mask = AR_b MAR_f.mask = np.False_ +MAR_f.flat = [9] + # Inplace addition MAR_b += AR_LIKE_b diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index f2b7aa215c7a..4f9d971759e6 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -431,6 +431,8 @@ assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) +assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) + def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] From bb31564192e4789b94bd7cc4c703b2beb8ce0806 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 12:24:57 -0600 Subject: [PATCH 250/294] MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 (#29471) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.0 to 3.1.2. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/ffd835cef18fa11522f608fc0fa973b89f5ddc87...9e4e50bd76b3190f55304387e333f6234823ea9b) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index dbfb0ad48960..5c1b35653d68 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 + - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ca41042e1ad3..3fd37bb60ee5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 + uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From d621a3162e7a14e986e2dee29bbf9ff05f7b728d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 00:49:32 +0100 Subject: [PATCH 251/294] TYP: Type ``MaskedArray.recordmask`` (#29467) --- numpy/ma/core.pyi | 6 +++--- numpy/typing/tests/data/fail/ma.pyi | 5 +++++ numpy/typing/tests/data/reveal/ma.pyi | 3 +++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 09edffc889a5..764138048e1b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -509,13 +509,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self) -> NDArray[MaskType] | MaskType: ... + def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @mask.setter def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self): ... + def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @recordmask.setter - def recordmask(self, mask): ... + def recordmask(self, mask: Never, /) -> NoReturn: ... def harden_mask(self) -> Self: ... def soften_mask(self) -> Self: ... @property diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 160d30f885e3..9354084fead0 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -17,6 +17,11 @@ AR_b: npt.NDArray[np.bool] MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] MAR_1d_f8.dtype = np.bool # type: ignore[assignment] +def invalid_recordmask_setter() -> None: + # We make an inner function for this one to avoid the + # `NoReturn` causing an early exit for type checkers. + MAR_1d_f8.recordmask = [True] # type: ignore[assignment] + np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 4f9d971759e6..d708eeed45e2 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -374,6 +374,9 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_i8.recordmask, np.ma.MaskType | NDArray[np.ma.MaskType]) +assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], np.dtype[np.ma.MaskType]]) + assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) From e4aee500a2bf497457efd825b7d578257c0e04f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:30:15 +0100 Subject: [PATCH 252/294] TYP: Type ``MaskedArray.__new__`` (#29457) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 63 +++++++++++++- numpy/typing/tests/data/fail/ma.pyi | 5 ++ numpy/typing/tests/data/reveal/ma.pyi | 114 +++++++++++++++----------- 3 files changed, 132 insertions(+), 50 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 764138048e1b..824ceef18822 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -455,7 +455,68 @@ class MaskedIterator: class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any - def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + @overload + def __new__( + cls, + data: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + dtype: None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + *, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[Any]: ... + def __array_finalize__(self, obj): ... def __array_wrap__(self, obj, context=..., return_scalar=...): ... diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 9354084fead0..ed973462e2d4 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -148,3 +148,8 @@ MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] + +np.ma.MaskedArray(np.array([1,2,3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), order='Corinthian') # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index d708eeed45e2..3edd300c0b5a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,14 +1,20 @@ -from typing import Any, Literal, TypeAlias, TypeVar, assert_type, NoReturn +from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type import numpy as np from numpy import dtype, generic from numpy._typing import NDArray, _AnyShape _ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -class MaskedArraySubclass(MaskedArray[np.complex128]): ... +class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... + +class IntoMaskedArraySubClass(Generic[_ScalarT_co]): + def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... + +MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] @@ -43,7 +49,8 @@ MAR_V: MaskedArray[np.void] MAR_floating: MaskedArray[np.floating] MAR_number: MaskedArray[np.number] -MAR_subclass: MaskedArraySubclass +MAR_subclass: MaskedArraySubclassC +MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] @@ -66,9 +73,9 @@ assert_type(np.ma.min(MAR_b, axis=0), Any) assert_type(np.ma.min(MAR_f4, axis=0), Any) assert_type(np.ma.min(MAR_b, keepdims=True), Any) assert_type(np.ma.min(MAR_f4, keepdims=True), Any) -assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.min(), np.bool) assert_type(MAR_f4.min(), np.float32) @@ -76,9 +83,9 @@ assert_type(MAR_b.min(axis=0), Any) assert_type(MAR_f4.min(axis=0), Any) assert_type(MAR_b.min(keepdims=True), Any) assert_type(MAR_f4.min(keepdims=True), Any) -assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.max(MAR_b), np.bool) assert_type(np.ma.max(MAR_f4), np.float32) @@ -86,9 +93,9 @@ assert_type(np.ma.max(MAR_b, axis=0), Any) assert_type(np.ma.max(MAR_f4, axis=0), Any) assert_type(np.ma.max(MAR_b, keepdims=True), Any) assert_type(np.ma.max(MAR_f4, keepdims=True), Any) -assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.max(), np.bool) assert_type(MAR_f4.max(), np.float32) @@ -96,9 +103,9 @@ assert_type(MAR_b.max(axis=0), Any) assert_type(MAR_f4.max(axis=0), Any) assert_type(MAR_b.max(keepdims=True), Any) assert_type(MAR_f4.max(keepdims=True), Any) -assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.ptp(MAR_b), np.bool) assert_type(np.ma.ptp(MAR_f4), np.float32) @@ -106,9 +113,9 @@ assert_type(np.ma.ptp(MAR_b, axis=0), Any) assert_type(np.ma.ptp(MAR_f4, axis=0), Any) assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) -assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.ptp(), np.bool) assert_type(MAR_f4.ptp(), np.float32) @@ -116,9 +123,9 @@ assert_type(MAR_b.ptp(axis=0), Any) assert_type(MAR_f4.ptp(axis=0), Any) assert_type(MAR_b.ptp(keepdims=True), Any) assert_type(MAR_f4.ptp(keepdims=True), Any) -assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) @@ -126,8 +133,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmin(axis=0), Any) assert_type(MAR_f4.argmin(axis=0), Any) assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -135,8 +142,8 @@ assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmin(MAR_b, axis=0), Any) assert_type(np.ma.argmin(MAR_f4, axis=0), Any) assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) @@ -144,8 +151,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmax(axis=0), Any) assert_type(MAR_f4.argmax(axis=0), Any) assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) @@ -153,8 +160,8 @@ assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmax(MAR_b, axis=0), Any) assert_type(np.ma.argmax(MAR_f4, axis=0), Any) assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.all(), np.bool) assert_type(MAR_f4.all(), np.bool) @@ -164,8 +171,8 @@ assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.any(), np.bool) assert_type(MAR_f4.any(), np.bool) @@ -175,22 +182,22 @@ assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.sort(), None) assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) -assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) assert_type(MAR_f8.take(0), np.float64) assert_type(MAR_1d.take(0), Any) assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) -assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take(f, 0), Any) assert_type(np.ma.take(f4, 0), np.float32) @@ -199,8 +206,8 @@ assert_type(np.ma.take(AR_f4, 0), np.float32) assert_type(np.ma.take(MAR_1d, 0), Any) assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) -assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take([1], [0]), MaskedArray[Any]) assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) @@ -391,17 +398,17 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) -assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) assert_type(MAR_f8.trace(), Any) -assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclassC) assert_type(MAR_f8.round(), MaskedArray[np.float64]) -assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) @@ -411,10 +418,10 @@ assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) assert_type(MAR_f8.cumprod(), MaskedArray[Any]) -assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) -assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.view(), MaskedArray[np.float64]) assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) @@ -439,6 +446,15 @@ assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] +assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), copy=True), MaskedArray[Any]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` +assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` +assert_type(np.ma.MaskedArray(MAR_into_subclass), MaskedArray[np.float32]) + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) @@ -780,27 +796,27 @@ assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] assert_type(MAR_f8.sum(), Any) assert_type(MAR_f8.sum(axis=0), Any) assert_type(MAR_f8.sum(keepdims=True), Any) -assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.std(), Any) assert_type(MAR_f8.std(axis=0), Any) assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) -assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.var(), Any) assert_type(MAR_f8.var(axis=0), Any) assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) -assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.mean(), Any) assert_type(MAR_f8.mean(axis=0), Any) assert_type(MAR_f8.mean(keepdims=True), Any) -assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.prod(), Any) assert_type(MAR_f8.prod(axis=0), Any) assert_type(MAR_f8.prod(keepdims=True), Any) -assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclassC) # MaskedArray "true" division From 75169a9cba27bd28bb19c36e8d0cff6a49f35bf0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:34:37 +0100 Subject: [PATCH 253/294] TYP: Remove ``MaskedArray.__reduce__``, and punt on ``MaskedArray.__{eq,ne}__`` (#29470) --- numpy/ma/core.pyi | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 824ceef18822..79503caa7821 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -601,8 +601,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... def compress(self, condition, axis=..., out=...): ... - def __eq__(self, other): ... - def __ne__(self, other): ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Incomplete, /) -> Incomplete: ... + def __ne__(self, other: Incomplete, /) -> Incomplete: ... + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] @@ -1902,7 +1906,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... # - def __reduce__(self): ... def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` From 00bbfefed97140b3c493b9154d4db62dfe2cf787 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:36:48 +0100 Subject: [PATCH 254/294] TYP: Type ``MaskedArray.__getitem__`` (#29472) --- numpy/__init__.pyi | 1 + numpy/ma/core.pyi | 16 +++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 12 ++++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 302f0cdf15fc..643327f29d1b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2106,6 +2106,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, ) -> ndarray[_ShapeT, _DTypeT]: ... + # Keep in sync with `MaskedArray.__getitem__` @overload def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 79503caa7821..51038178ee6f 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -25,6 +25,7 @@ from numpy import ( _OrderKACF, _PartitionKind, _SortKind, + _ToIndices, amax, amin, bool_, @@ -54,6 +55,7 @@ from numpy import ( str_, timedelta64, unsignedinteger, + void, ) from numpy._globals import _NoValueType from numpy._typing import ( @@ -290,6 +292,7 @@ _MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] _MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] _MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | bool_] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ @@ -562,7 +565,18 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): fill_value: _ScalarLike_co | None = None ) -> MaskedArray[_ShapeT_co, dtype]: ... - def __getitem__(self, indx): ... + # Keep in sync with `ndarray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... + def __setitem__(self, indx, value): ... @property def shape(self) -> _ShapeT_co: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 3edd300c0b5a..344a9ef70160 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -18,6 +18,7 @@ MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_i8: NDArray[np.int64] AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] @@ -54,6 +55,7 @@ MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] +MAR_2d_V: np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]] b: np.bool f4: np.float32 @@ -365,6 +367,16 @@ assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) +assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[0, 0], Any) +assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) +assert_type(MAR_2d_V['field_0'], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[['field_0', 'field_1']], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) + assert_type(np.ma.nomask, np.bool[Literal[False]]) assert_type(np.ma.MaskType, type[np.bool]) From d53fe4aa45f44f52835e4b1946434b1a4acc76c8 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 15:44:04 +0100 Subject: [PATCH 255/294] TYP: Type ``MaskedArray.fill_value`` (#29468) --- numpy/ma/core.pyi | 4 ++-- numpy/typing/tests/data/pass/ma.py | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 51038178ee6f..d79b4695bb32 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -607,9 +607,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @flat.setter def flat(self, value: ArrayLike, /) -> None: ... @property - def fill_value(self): ... + def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... @fill_value.setter - def fill_value(self, value=...): ... + def fill_value(self, value: _ScalarLike_co | None = None) -> None: ... get_fill_value: Any set_fill_value: Any def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 793f7097ee81..02655d7f88c7 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -34,6 +34,8 @@ MAR_f.mask = AR_b MAR_f.mask = np.False_ +MAR_i.fill_value = 0 + MAR_f.flat = [9] # Inplace addition diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 344a9ef70160..52c0dfa6ab75 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -401,6 +401,8 @@ assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[in assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_i8.fill_value, np.int64) + assert_type(MAR_b.transpose(), MaskedArray[np.bool]) assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) From 97c6b8602791e266b0e073914e25121df0bb4936 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 30 Jul 2025 08:44:48 -0700 Subject: [PATCH 256/294] Add .file entry to all .s SVML files --- numpy/_core/src/umath/svml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/svml b/numpy/_core/src/umath/svml index 32bf2a984207..3a713b130183 160000 --- a/numpy/_core/src/umath/svml +++ b/numpy/_core/src/umath/svml @@ -1 +1 @@ -Subproject commit 32bf2a98420762a63ab418aaa0a7d6e17eb9627a +Subproject commit 3a713b13018325451c1b939d3914ceff5ec68e19 From 19574af8decf1285cd4a5ca10af8fe81fd30f6bc Mon Sep 17 00:00:00 2001 From: Bernard Roesler Date: Wed, 30 Jul 2025 12:17:09 -0400 Subject: [PATCH 257/294] BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) * BUG: Always return a real dtype from linalg.cond. Addresses gh-18304. The condition number of a matrix is the product of two norms, which are always non-negative and real-valued, so the condition number itself should be non-negative and real-valued. This commit returns the proper real dtype from `linalg.cond`, and includes tests for the condition number of a complex matrix in various norms. * ENH: Change type of complex results only. This commit addresses a reviewer comment on the blanket application of `abs(r)`. It specifically ensures the return type of complex-valued matrices will be the corresponding real type. --- numpy/linalg/_linalg.py | 1 + numpy/linalg/tests/test_linalg.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 52a2ffb8f50b..7471db7e1fe2 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2011,6 +2011,7 @@ def cond(x, p=None): # contain nans in the entries where inversion failed. _assert_stacked_square(x) t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 8ad1c3ed6d16..4cc9ac7a5496 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -793,15 +793,28 @@ def do(self, a, b, tags): class TestCond(CondCases): - def test_basic_nonsvd(self): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): # Smoketest the non-svd norms A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) assert_almost_equal(linalg.cond(A, inf), 4) assert_almost_equal(linalg.cond(A, -inf), 2 / 3) assert_almost_equal(linalg.cond(A, 1), 4) assert_almost_equal(linalg.cond(A, -1), 0.5) assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + def test_singular(self): # Singular matrices have infinite condition number for # positive norms, and negative norms shouldn't raise From b4586aea61e94589e3c62e1ac6d2adc05e5a40f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 18:02:32 +0000 Subject: [PATCH 258/294] MAINT: Bump github/codeql-action from 3.29.4 to 3.29.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.4 to 3.29.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4e828ff8d448a8a6e532957b1811f387a63867e8...51f77329afa6477de8c49fc9c7046c15b9a4e79d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1aea33f531f4..2d5fdb08a9d4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 84fbe1f03cb1..079813c3aea3 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v2.1.27 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v2.1.27 with: sarif_file: results.sarif From 46268caa719db95ab2198bcd619f0e3120d7c52e Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Thu, 31 Jul 2025 05:09:16 +0900 Subject: [PATCH 259/294] =?UTF-8?q?DOC:=20Clarify=20that=20`numpy.printopt?= =?UTF-8?q?ions`=20applies=20only=20to=20`ndarray`,=20not=E2=80=A6=20(#294?= =?UTF-8?q?50)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * DOC: Clarify that `numpy.printoptions` applies only to `ndarray`, not scalars As discussed in #29409, numpy.printoptions applies only to `numpy.ndarray`, not to scalars. A Note describing this behavior has been added to the docstring. * Update numpy/_core/arrayprint.py [skip cirrus] [skip actions] [skip azp] --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/arrayprint.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 2a684280610b..9eda8db40dfc 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -250,9 +250,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, Notes ----- - `formatter` is always reset with a call to `set_printoptions`. - Use `printoptions` as a context manager to set the values temporarily. + * ``formatter`` is always reset with a call to `set_printoptions`. + * Use `printoptions` as a context manager to set the values temporarily. + * These print options apply only to NumPy ndarrays, not to scalars. Examples -------- @@ -352,6 +353,10 @@ def get_printoptions(): For a full description of these options, see `set_printoptions`. + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + See Also -------- set_printoptions, printoptions @@ -410,6 +415,10 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + """ token = _set_printoptions(*args, **kwargs) From 4126291bc7062cbe636edf810b006653b693f78d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 09:59:40 +0100 Subject: [PATCH 260/294] MAINT: remove unnecessary `kwargs` update in `MaskedArray.reshape` (#29403) --- numpy/ma/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 84e029439725..9b705460dd85 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4818,7 +4818,6 @@ def reshape(self, *s, **kwargs): fill_value=999999) """ - kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask From a80542541130a2dc47eab2a6972019453cc0c518 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 31 Jul 2025 11:03:20 +0200 Subject: [PATCH 261/294] MAINT: Replace setting of array shape by reshape operation (#29314) This PR we replace some explicit setting of the shape of an array in favor of using reshape. Some cases are left out (e.g. masked arrays, tests to check the .shape setting works). This is because explicit shape setting is generally unsafe mutation that we would like to avoid/get rid of. --- numpy/_core/numeric.py | 5 +- numpy/_core/tests/test_einsum.py | 70 ++++++++++++--------------- numpy/_core/tests/test_multiarray.py | 22 ++++----- numpy/lib/_format_impl.py | 4 +- numpy/lib/_function_base_impl.py | 5 +- numpy/lib/tests/test_arrayterator.py | 3 +- numpy/linalg/_linalg.py | 9 ++-- numpy/linalg/tests/test_linalg.py | 9 ++-- numpy/linalg/tests/test_regression.py | 2 +- numpy/ma/tests/test_core.py | 23 +++++---- numpy/ma/tests/test_extras.py | 2 +- tools/swig/test/testFlat.py | 6 +-- 12 files changed, 74 insertions(+), 86 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 4886468f0864..ec2cbf6dd7fc 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1106,10 +1106,9 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) + >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) + >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], [3, 4]], diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 84d4af1707b6..82789eae0679 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -231,21 +231,20 @@ def __rmul__(self, other): def test_einsum_views(self): # pass-through for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) # output is writeable whenever input is writeable @@ -256,115 +255,110 @@ def test_einsum_views(self): assert_(not b.flags['WRITEABLE']) # transpose - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) # diagonal - a = np.arange(9) - a.shape = (3, 3) + a = np.arange(9).reshape((3, 3)) b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) + a = np.arange(24).reshape((2, 3, 4)) b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) def check_einsum_sums(self, dtype, do_opt=False): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index e7647d2e23fe..cf2f899b7991 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2279,8 +2279,7 @@ def test_sort_axis(self): def test_sort_size_0(self): # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array sort with axis={axis}' assert_equal(np.sort(a, axis=axis), a, msg) @@ -2541,8 +2540,7 @@ def test_argsort(self): assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argsort with axis={axis}' assert_equal(np.argsort(a, axis=axis), @@ -2859,8 +2857,7 @@ def test_partition_integer(self): def test_partition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array partition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), a, msg) @@ -2871,8 +2868,7 @@ def test_partition_empty_array(self, kth_dtype): def test_argpartition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argpartition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), @@ -5367,7 +5363,7 @@ def test_ip_types(self): unchecked_types = [bytes, str, np.void] x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) for types in np._core.sctypes.values(): for T in types: if T not in unchecked_types: @@ -5378,20 +5374,20 @@ def test_ip_types(self): def test_raise(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) @@ -5971,7 +5967,7 @@ class TestFlat: def setup_method(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) - a0.shape = (4, 5) + a0 = a0.reshape((4, 5)) a.flags.writeable = False self.a = a self.b = a[::2, ::2] diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index 7378ba554810..bfda7fec73b6 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -879,10 +879,10 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, ) if fortran_order: - array.shape = shape[::-1] + array = array.reshape(shape[::-1]) array = array.transpose() else: - array.shape = shape + array = array.reshape(shape) return array diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f5690a829fc6..05823ca3741f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1317,7 +1317,10 @@ def gradient(f, *varargs, axis=None, edge_order=1): # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 - a.shape = b.shape = c.shape = shape + + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + c * f[tuple(slice4)] diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index 800c9a2a5f77..42a85e58ff62 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -14,8 +14,7 @@ def test(): ndims = randint(5) + 1 shape = tuple(randint(10) + 1 for dim in range(ndims)) els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape + a = np.arange(els).reshape(shape) buf_size = randint(2 * els) b = Arrayterator(a, buf_size) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 7471db7e1fe2..7e2f1ebf531b 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -317,8 +317,7 @@ def tensorsolve(a, b, axes=None): Examples -------- >>> import numpy as np - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) + >>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4)) >>> rng = np.random.default_rng() >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) @@ -495,8 +494,7 @@ def tensorinv(a, ind=2): Examples -------- >>> import numpy as np - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) + >>> a = np.eye(4*6).reshape((4, 6, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) @@ -505,8 +503,7 @@ def tensorinv(a, ind=2): >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) + >>> a = np.eye(4*6).reshape((24, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 4cc9ac7a5496..b3744024fd88 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -2218,8 +2218,7 @@ def test_non_square_handling(self, arr, ind): ((24, 8, 3), 1), ]) def test_tensorinv_shape(self, shape, ind): - a = np.eye(24) - a.shape = shape + a = np.eye(24).reshape(shape) ainv = linalg.tensorinv(a=a, ind=ind) expected = a.shape[ind:] + a.shape[:ind] actual = ainv.shape @@ -2229,15 +2228,13 @@ def test_tensorinv_shape(self, shape, ind): 0, -2, ]) def test_tensorinv_ind_limit(self, ind): - a = np.eye(24) - a.shape = (4, 6, 8, 3) + a = np.eye(24).reshape((4, 6, 8, 3)) with assert_raises(ValueError): linalg.tensorinv(a=a, ind=ind) def test_tensorinv_result(self): # mimic a docstring example - a = np.eye(24) - a.shape = (24, 8, 3) + a = np.eye(24).reshape((24, 8, 3)) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index c46f83adb0af..e02e955cfa40 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -33,7 +33,7 @@ def test_eig_build(self): 1.51971555e-15 + 0.j, -1.51308713e-15 + 0.j]) a = arange(13 * 13, dtype=float64) - a.shape = (13, 13) + a = a.reshape((13, 13)) a = a % 17 va, ve = linalg.eig(a) va.sort() diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2704857308a3..5327963999e0 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -216,11 +216,11 @@ def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) @@ -246,7 +246,12 @@ def test_concatenate_alongaxis(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # Concatenation along an axis s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) @@ -762,8 +767,7 @@ def test_pickling_wstructured(self): def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) + a = arange(10).reshape( (-1, 2)) b = a.T for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): test = pickle.loads(pickle.dumps(b, protocol=proto)) @@ -1176,8 +1180,7 @@ def test_basic_arithmetic(self): assert_equal(np.divide(x, y), divide(xm, ym)) def test_divide_on_different_shapes(self): - x = arange(6, dtype=float) - x.shape = (2, 3) + x = arange(6, dtype=float).reshape((2, 3)) y = arange(3, dtype=float) z = x / y diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 34a032087536..f9deb33fc2c5 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1077,7 +1077,7 @@ def test_3d(self): x = np.ma.arange(24).reshape(3, 4, 2) x[x % 3 == 0] = masked assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) + x = x.reshape((4, 3, 2)) assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) x = np.ma.arange(24).reshape(4, 3, 2) x[x % 5 == 0] = masked diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py index ce6f74819e86..543ee0c41d9f 100755 --- a/tools/swig/test/testFlat.py +++ b/tools/swig/test/testFlat.py @@ -43,7 +43,7 @@ def testProcess3D(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y) self.assertEqual(np.all((x + 1) == y), True) @@ -56,7 +56,7 @@ def testProcess3DTranspose(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y.T) self.assertEqual(np.all((x.T + 1) == y.T), True) @@ -69,7 +69,7 @@ def testProcessNoncontiguous(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) self.assertRaises(TypeError, process, x[:, :, 0]) From bac0a8e844a6cf18335177e6203e0e8a92cddeca Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Thu, 31 Jul 2025 09:52:27 -0400 Subject: [PATCH 262/294] DOC: Add 'See Also' refs for sign, copysign and signbit. (#29489) [skip azp] [skip cirrus] [skip actions] --- numpy/_core/code_generators/ufunc_docstrings.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index ddae87bd6012..e976be723287 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3792,6 +3792,11 @@ def add_newdoc(place, name, doc): The sign of `x`. $OUT_SCALAR_1 + See Also + -------- + signbit + copysign + Notes ----- There is more than one definition of sign in common use for complex @@ -3828,6 +3833,11 @@ def add_newdoc(place, name, doc): Output array, or reference to `out` if that was supplied. $OUT_SCALAR_1 + See Also + -------- + sign + copysign + Examples -------- >>> import numpy as np @@ -3859,6 +3869,11 @@ def add_newdoc(place, name, doc): The values of `x1` with the sign of `x2`. $OUT_SCALAR_2 + See Also + -------- + sign + signbit + Examples -------- >>> import numpy as np From 4ab0c51c5203ef58dfae19ecb90ef3f6ae79126a Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:23:11 +0100 Subject: [PATCH 263/294] MAINT: Do not exclude `typing/tests/data` from ruff (#29479) --- numpy/typing/tests/data/fail/arithmetic.pyi | 2 +- numpy/typing/tests/data/fail/bitwise_ops.pyi | 2 +- numpy/typing/tests/data/fail/chararray.pyi | 1 + numpy/typing/tests/data/fail/comparisons.pyi | 8 ++-- numpy/typing/tests/data/fail/datasource.pyi | 1 + numpy/typing/tests/data/fail/fromnumeric.pyi | 6 +-- numpy/typing/tests/data/fail/ma.pyi | 34 +++++++------- .../tests/data/fail/nested_sequence.pyi | 1 + numpy/typing/tests/data/fail/npyio.pyi | 2 +- numpy/typing/tests/data/fail/scalars.pyi | 1 - numpy/typing/tests/data/fail/shape.pyi | 1 + numpy/typing/tests/data/fail/twodim_base.pyi | 2 +- numpy/typing/tests/data/fail/type_check.pyi | 1 - .../tests/data/misc/extended_precision.pyi | 4 +- numpy/typing/tests/data/pass/arithmetic.py | 6 ++- .../tests/data/pass/array_constructors.py | 1 + numpy/typing/tests/data/pass/array_like.py | 2 +- numpy/typing/tests/data/pass/arrayterator.py | 1 + numpy/typing/tests/data/pass/bitwise_ops.py | 2 +- numpy/typing/tests/data/pass/comparisons.py | 9 ++-- numpy/typing/tests/data/pass/index_tricks.py | 2 + numpy/typing/tests/data/pass/literal.py | 3 +- numpy/typing/tests/data/pass/mod.py | 2 +- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- numpy/typing/tests/data/pass/numeric.py | 2 + numpy/typing/tests/data/pass/random.py | 1 + numpy/typing/tests/data/pass/recfunctions.py | 18 ++++---- numpy/typing/tests/data/pass/scalars.py | 1 + numpy/typing/tests/data/pass/shape.py | 2 +- numpy/typing/tests/data/pass/simple.py | 2 +- numpy/typing/tests/data/pass/ufunclike.py | 2 + numpy/typing/tests/data/reveal/arithmetic.pyi | 3 +- numpy/typing/tests/data/reveal/arrayprint.pyi | 2 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 3 +- .../typing/tests/data/reveal/comparisons.pyi | 8 ++-- .../typing/tests/data/reveal/fromnumeric.pyi | 1 - numpy/typing/tests/data/reveal/ma.pyi | 44 +++++++++---------- numpy/typing/tests/data/reveal/mod.pyi | 3 +- .../tests/data/reveal/ndarray_conversion.pyi | 1 - .../typing/tests/data/reveal/ndarray_misc.pyi | 3 +- .../tests/data/reveal/polynomial_polybase.pyi | 3 +- .../data/reveal/polynomial_polyutils.pyi | 3 +- .../tests/data/reveal/polynomial_series.pyi | 2 +- .../typing/tests/data/reveal/ufunc_config.pyi | 3 +- numpy/typing/tests/data/reveal/ufunclike.pyi | 2 +- ruff.toml | 3 +- 46 files changed, 110 insertions(+), 100 deletions(-) diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index e696083b8614..d62906ae87d9 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -30,7 +30,7 @@ AR_LIKE_M: list[np.datetime64] # NOTE: mypys `NoReturn` errors are, unfortunately, not that great _1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] _2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] -AR_i - bytes() # type: ignore[operator] +AR_i - b"" # type: ignore[operator] AR_f - AR_LIKE_m # type: ignore[operator] AR_f - AR_LIKE_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 3538ec7d64c7..29dfe79287ad 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -4,7 +4,7 @@ i8 = np.int64() i4 = np.int32() u8 = np.uint64() b_ = np.bool() -i = int() +i = 0 f8 = np.float64() diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index fb52f7349dd1..f1b7009439d1 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] diff --git a/numpy/typing/tests/data/fail/comparisons.pyi b/numpy/typing/tests/data/fail/comparisons.pyi index 3c8a94bff240..d2965b5c1a91 100644 --- a/numpy/typing/tests/data/fail/comparisons.pyi +++ b/numpy/typing/tests/data/fail/comparisons.pyi @@ -21,7 +21,7 @@ AR_M > AR_i # type: ignore[operator] AR_M > AR_f # type: ignore[operator] AR_M > AR_m # type: ignore[operator] -AR_i > str() # type: ignore[operator] -AR_i > bytes() # type: ignore[operator] -str() > AR_M # type: ignore[operator] -bytes() > AR_M # type: ignore[operator] +AR_i > "" # type: ignore[operator] +AR_i > b"" # type: ignore[operator] +"" > AR_M # type: ignore[operator] +b"" > AR_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/datasource.pyi b/numpy/typing/tests/data/fail/datasource.pyi index 267b672baea7..4c603cf693a1 100644 --- a/numpy/typing/tests/data/fail/datasource.pyi +++ b/numpy/typing/tests/data/fail/datasource.pyi @@ -1,4 +1,5 @@ from pathlib import Path + import numpy as np path: Path diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 51ef26810e21..d0a81f0bfbfe 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -32,18 +32,18 @@ np.swapaxes(A, 1, [0]) # type: ignore[call-overload] np.transpose(A, axes=1.0) # type: ignore[call-overload] np.partition(a, None) # type: ignore[call-overload] -np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] np.partition(A, 0, kind="bob") # type: ignore[call-overload] np.partition(A, 0, order=range(5)) # type: ignore[arg-type] np.argpartition(a, None) # type: ignore[arg-type] np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] -np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] np.sort(A, axis="bob") # type: ignore[call-overload] np.sort(A, kind="bob") # type: ignore[call-overload] -np.sort(A, order=range(5)) # type: ignore[arg-type] +np.sort(A, order=range(5)) # type: ignore[arg-type] np.argsort(A, axis="bob") # type: ignore[arg-type] np.argsort(A, kind="bob") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index ed973462e2d4..b5921660d617 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -82,7 +82,7 @@ MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] -MAR_1d_f8.sort(axis=(0,1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] @@ -101,13 +101,13 @@ np.ma.take(out=1) # type: ignore[call-overload] np.ma.take(mode="bob") # type: ignore[call-overload] MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] @@ -116,10 +116,10 @@ np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] np.ma.size(AR_b, axis='0') # type: ignore[arg-type] -MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] @@ -129,15 +129,15 @@ MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] -np.ma.put([1,1,3], 0, 999) # type: ignore[arg-type] +np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] -np.ma.allequal(MAR_1d_f8, [1,2,3], fill_value=1.5) # type: ignore[arg-type] +np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], rtol='.4') # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], atol='.5') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol='.4') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol='.5') # type: ignore[arg-type] MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] @@ -147,9 +147,9 @@ MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] -MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] +MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] -np.ma.MaskedArray(np.array([1,2,3]), keep_mask='yes') # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), subok=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), ndim=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), order='Corinthian') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order='Corinthian') # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/nested_sequence.pyi b/numpy/typing/tests/data/fail/nested_sequence.pyi index a28d3df3c749..1004a36accc7 100644 --- a/numpy/typing/tests/data/fail/nested_sequence.pyi +++ b/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -1,4 +1,5 @@ from collections.abc import Sequence + from numpy._typing import _NestedSequence a: Sequence[float] diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index e204566a5877..d57ebe3a2e3e 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -1,8 +1,8 @@ import pathlib from typing import IO -import numpy.typing as npt import numpy as np +import numpy.typing as npt str_path: str bytes_path: bytes diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index bfbe9125e529..0560a855a80f 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -1,4 +1,3 @@ -import sys import numpy as np f2: np.float16 diff --git a/numpy/typing/tests/data/fail/shape.pyi b/numpy/typing/tests/data/fail/shape.pyi index fea055583073..a024d7bda273 100644 --- a/numpy/typing/tests/data/fail/shape.pyi +++ b/numpy/typing/tests/data/fail/shape.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np # test bounds of _ShapeT_co diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index d0f2b7ad8322..e146d68c7418 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, TypeVar +from typing import Any import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/fail/type_check.pyi b/numpy/typing/tests/data/fail/type_check.pyi index 94b6ee425af5..8b68e996304c 100644 --- a/numpy/typing/tests/data/fail/type_check.pyi +++ b/numpy/typing/tests/data/fail/type_check.pyi @@ -1,5 +1,4 @@ import numpy as np -import numpy.typing as npt DTYPE_i8: np.dtype[np.int64] diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 84b5f516bdde..7978faf4d5bd 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,8 +1,8 @@ +from typing import assert_type + import numpy as np from numpy._typing import _96Bit, _128Bit -from typing import assert_type - assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 3b2901cf2b51..62c978a2fc51 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,9 +1,11 @@ from __future__ import annotations from typing import Any, cast + +import pytest + import numpy as np import numpy.typing as npt -import pytest c16 = np.complex128(1) f8 = np.float64(1) @@ -23,7 +25,7 @@ b = bool(1) c = complex(1) f = float(1) -i = int(1) +i = 1 class Object: diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 17b6fab93ad8..d91d257cb17c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -3,6 +3,7 @@ import numpy as np import numpy.typing as npt + class Index: def __index__(self) -> int: return 0 diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 264ec55da053..f922beae34ce 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -5,7 +5,7 @@ import numpy as np if TYPE_CHECKING: - from numpy._typing import NDArray, ArrayLike, _SupportsArray + from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/arrayterator.py b/numpy/typing/tests/data/pass/arrayterator.py index 572be5e2fe29..a99c09a25231 100644 --- a/numpy/typing/tests/data/pass/arrayterator.py +++ b/numpy/typing/tests/data/pass/arrayterator.py @@ -2,6 +2,7 @@ from __future__ import annotations from typing import Any + import numpy as np AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) diff --git a/numpy/typing/tests/data/pass/bitwise_ops.py b/numpy/typing/tests/data/pass/bitwise_ops.py index 22a245d21809..2d4815b0d940 100644 --- a/numpy/typing/tests/data/pass/bitwise_ops.py +++ b/numpy/typing/tests/data/pass/bitwise_ops.py @@ -9,7 +9,7 @@ b_ = np.bool(1) b = bool(1) -i = int(1) +i = 1 AR = np.array([0, 1, 2], dtype=np.int32) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index a461d8b660da..b2e52762c7a8 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import cast, Any +from typing import Any, cast + import numpy as np c16 = np.complex128() @@ -18,10 +19,10 @@ b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 SEQ = (0, 1, 2, 3, 4) diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index dfc4ff2f314a..ea98156a8f2e 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np AR_LIKE_b = [[True, True], [True, True]] diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index c8fa476210e3..f1e0cb2a69d3 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -1,9 +1,10 @@ from __future__ import annotations -from typing import Any, TYPE_CHECKING from functools import partial +from typing import TYPE_CHECKING, Any import pytest + import numpy as np if TYPE_CHECKING: diff --git a/numpy/typing/tests/data/pass/mod.py b/numpy/typing/tests/data/pass/mod.py index 2b7e6cd85c73..464326486fa2 100644 --- a/numpy/typing/tests/data/pass/mod.py +++ b/numpy/typing/tests/data/pass/mod.py @@ -13,7 +13,7 @@ b = bool(1) f = float(1) -i = int(1) +i = 1 AR = np.array([1], dtype=np.bool) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index b3428f6b48c1..30ad270edd6e 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,14 +9,16 @@ from __future__ import annotations import operator -from typing import cast, Any +from typing import Any, cast import numpy as np import numpy.typing as npt + class SubClass(npt.NDArray[np.float64]): ... class IntSubClass(npt.NDArray[np.intp]): ... + i4 = np.int32(1) A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) B0 = np.empty((), dtype=np.int32).view(SubClass) diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 1eb14cf3a2a2..4c1ffa0b2776 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -6,11 +6,13 @@ """ from __future__ import annotations + from typing import cast import numpy as np import numpy.typing as npt + class SubClass(npt.NDArray[np.float64]): ... diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index bce204a7378e..fd07e378e553 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import Any + import numpy as np SEED_NONE = None diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py index 52a3d78a7622..cca0c3988708 100644 --- a/numpy/typing/tests/data/pass/recfunctions.py +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -12,7 +12,7 @@ def test_recursive_fill_fields() -> None: [(1, 10.0), (2, 20.0)], dtype=[("A", np.int64), ("B", np.float64)], ) - b = np.zeros((int(3),), dtype=a.dtype) + b = np.zeros((3,), dtype=a.dtype) out = rfn.recursive_fill_fields(a, b) assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) @@ -51,8 +51,8 @@ def test_get_fieldstructure() -> None: def test_merge_arrays() -> None: assert_type( rfn.merge_arrays(( - np.ones((int(2),), np.int_), - np.ones((int(3),), np.float64), + np.ones((2,), np.int_), + np.ones((3,), np.float64), )), np.recarray[tuple[int], np.dtype[np.void]], ) @@ -60,7 +60,7 @@ def test_merge_arrays() -> None: def test_drop_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.drop_fields(a, "a"), @@ -78,7 +78,7 @@ def test_drop_fields() -> None: def test_rename_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), @@ -92,7 +92,7 @@ def test_repack_fields() -> None: assert_type(rfn.repack_fields(dt), np.dtype[np.void]) assert_type(rfn.repack_fields(dt.type(0)), np.void) assert_type( - rfn.repack_fields(np.ones((int(3),), dtype=dt)), + rfn.repack_fields(np.ones((3,), dtype=dt)), np.ndarray[tuple[int], np.dtype[np.void]], ) @@ -133,14 +133,14 @@ def test_require_fields() -> None: def test_stack_arrays() -> None: - x = np.zeros((int(2),), np.int32) + x = np.zeros((2,), np.int32) assert_type( rfn.stack_arrays(x), np.ndarray[tuple[int], np.dtype[np.int32]], ) - z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) - zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + z = np.ones((2,), [("A", "|S3"), ("B", float)]) + zz = np.ones((2,), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) assert_type( rfn.stack_arrays((z, zz)), np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 655903a50bce..eeb707b255e1 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -1,6 +1,7 @@ import datetime as dt import pytest + import numpy as np b = np.bool() diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py index 286c8a81dacf..e3b497bc0310 100644 --- a/numpy/typing/tests/data/pass/shape.py +++ b/numpy/typing/tests/data/pass/shape.py @@ -1,4 +1,4 @@ -from typing import Any, NamedTuple, cast +from typing import Any, NamedTuple import numpy as np diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 408d2482b0ef..003e9ee58bb1 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -1,9 +1,9 @@ """Simple expression that should pass with mypy.""" import operator +from collections.abc import Iterable import numpy as np import numpy.typing as npt -from collections.abc import Iterable # Basic checks array = np.array([1, 2]) diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index f993939ddba1..c02a68cc062c 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 5dd78a197b8f..81a98a9d96d2 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -64,7 +64,6 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] - # Array subtraction assert_type(AR_number - AR_number, npt.NDArray[np.number]) @@ -556,7 +555,7 @@ assert_type(f16 + f8, np.floating[_128Bit] | np.float64) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) assert_type(f4 + f8, np.float32 | np.float64) -assert_type(i4 + f8,np.float64) +assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index 3b339edced32..de4077654bea 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,6 +1,6 @@ import contextlib from collections.abc import Callable -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 6c6b56197546..bef7c6739605 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,5 +1,4 @@ -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 2165d17fce34..6df5a3d94314 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,6 +1,6 @@ import decimal import fractions -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt @@ -20,10 +20,10 @@ td = np.timedelta64(0, "D") b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 AR = np.array([0], dtype=np.int64) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index a2ba83e6c4c8..f6cc9b9fe0d7 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,7 +1,6 @@ """Tests for :mod:`_core.fromnumeric`.""" from typing import Any, assert_type -from typing import Literal as L import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 52c0dfa6ab75..40caad61f24c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -286,7 +286,7 @@ assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) assert_type(MAR_byte.count(), int) assert_type(MAR_f4.count(axis=None), int) assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) -assert_type(MAR_b.count(axis=(0,1)), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0, 1)), NDArray[np.int_]) assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(None, True), NDArray[np.int_]) @@ -294,7 +294,7 @@ assert_type(MAR_o.count(None, True), NDArray[np.int_]) assert_type(np.ma.count(MAR_byte), int) assert_type(np.ma.count(MAR_byte, axis=None), int) assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) -assert_type(np.ma.count(MAR_b, axis=(0,1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0, 1)), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) @@ -302,13 +302,13 @@ assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(np.ma.compressed([[1,2,3]]), np.ndarray[tuple[int], np.dtype]) +assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) -assert_type(MAR_f4.put([0,4,8], [10,20,30]), None) +assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) assert_type(MAR_f4.put(4, 999, mode='clip'), None) -assert_type(np.ma.put(MAR_f4, [0,4,8], [10,20,30]), None) +assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) @@ -320,7 +320,7 @@ assert_type(MAR_i8.filled(), NDArray[np.int64]) assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) -assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) +assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) # PyRight detects this one correctly, but mypy doesn't. # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] @@ -345,7 +345,7 @@ assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) -assert_type(np.ma.getmask([1,2]), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | np.bool) assert_type(np.ma.getmask(np.int64(1)), np.bool) assert_type(np.ma.is_mask(MAR_1d), bool) @@ -458,12 +458,12 @@ assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedAr assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) def invalid_resize() -> None: - assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] + assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), [True, True, False], np.float16), MaskedArray[np.float16]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), dtype=np.float16), MaskedArray[np.float16]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), copy=True), MaskedArray[Any]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), copy=True), MaskedArray[Any]) # TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) # TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` @@ -1017,12 +1017,12 @@ assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) assert_type(MAR_u4 ** AR_LIKE_o, Any) -assert_type(AR_LIKE_b ** MAR_u4 , MaskedArray[np.uint32]) -assert_type(AR_LIKE_u ** MAR_u4 , MaskedArray[np.unsignedinteger]) -assert_type(AR_LIKE_i ** MAR_u4 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_f ** MAR_u4 , MaskedArray[np.floating]) -assert_type(AR_LIKE_c ** MAR_u4 , MaskedArray[np.complexfloating]) -assert_type(AR_LIKE_o ** MAR_u4 , Any) +assert_type(AR_LIKE_b ** MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4, Any) assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) @@ -1032,11 +1032,11 @@ assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) assert_type(MAR_i8 ** AR_LIKE_o, Any) assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) -assert_type(AR_LIKE_u ** MAR_i8 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_i ** MAR_i8 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_f ** MAR_i8 , MaskedArray[np.floating]) -assert_type(AR_LIKE_c ** MAR_i8 , MaskedArray[np.complexfloating]) -assert_type(AR_LIKE_o ** MAR_i8 , Any) +assert_type(AR_LIKE_u ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8, Any) assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 59a6a1016479..9bbbd5c52d7f 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,6 +1,5 @@ import datetime as dt -from typing import Literal as L -from typing import assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index bbd42573a774..5cb9b98029dd 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -37,7 +37,6 @@ any_sctype: np.ndarray[Any, Any] assert_type(any_dtype.tolist(), Any) assert_type(any_sctype.tolist(), Any) - # itemset does not return a value # tobytes is pretty simple # tofile does not return a value diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index b219f4e5bec2..2972a58c328f 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,12 +6,11 @@ function-based counterpart in `../from_numeric.py`. """ -from collections.abc import Iterator import ctypes as ct import operator +from collections.abc import Iterator from types import ModuleType from typing import Any, Literal, assert_type - from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index bb927035e40c..11265b92ff67 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,8 +1,7 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, LiteralString, TypeAlias, TypeVar, assert_type -from typing import Literal as L +from typing import Any, Literal as L, LiteralString, TypeAlias, TypeVar, assert_type import numpy as np import numpy.polynomial as npp diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 45522e72102f..07d6c9d1af65 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,8 +1,7 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.polynomial.polyutils as pu diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 93f0799c818d..0f4a9e09f2e7 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import Any, TypeAlias, assert_type +from typing import TypeAlias, assert_type import numpy as np import numpy.polynomial as npp diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 748507530aa1..77c27eb3b4ca 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,10 +1,9 @@ """Typing tests for `_core._ufunc_config`.""" +from _typeshed import SupportsWrite from collections.abc import Callable from typing import Any, assert_type -from _typeshed import SupportsWrite - import numpy as np def func(a: str, b: int) -> None: ... diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index a0ede60e0158..aaae5e80e470 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt diff --git a/ruff.toml b/ruff.toml index 4666efdf39bc..b3cfea15b190 100644 --- a/ruff.toml +++ b/ruff.toml @@ -2,7 +2,6 @@ extend-exclude = [ "numpy/__config__.py", "numpy/distutils", "numpy/typing/_char_codes.py", - "numpy/typing/tests/data", "spin/cmds.py", # Submodules. "doc/source/_static/scipy-mathjax", @@ -96,6 +95,8 @@ ignore = [ "numpy/_typing/_array_like.py" = ["E501"] "numpy/_typing/_dtype_like.py" = ["E501"] "numpy*pyi" = ["E501"] +# "useless assignments" aren't so useless when you're testing that they don't make type checkers scream +"numpy/typing/tests/data/*" = ["B015", "B018", "E501"] "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] From 3231ee85e69f222dad7c39b3e662d87d486a069c Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:24:56 +0100 Subject: [PATCH 264/294] TYP: Type ``MaskedArray.compress`` (#29480) --- numpy/ma/core.pyi | 31 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 5 +++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d79b4695bb32..70088a44a000 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -614,7 +614,36 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): set_fill_value: Any def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... - def compress(self, condition, axis=..., out=...): ... + + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: None = None, + out: None = None + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + out: None = None + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 40caad61f24c..441d39296ffa 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -301,6 +301,11 @@ assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False]), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False], axis=0), MaskedArray[np.float32]) +assert_type(MAR_f4.compress([True, False], axis=0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.compress([True, False], 0, MAR_subclass), MaskedArraySubclassC) + assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) From 0fbd49aad6d941008ccbb32c919186770b153483 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:25:36 +0100 Subject: [PATCH 265/294] TYP: Type ``MaskedArray.__setitem__`` (#29478) --- numpy/ma/core.pyi | 1 - numpy/typing/tests/data/pass/ma.py | 32 +++++++++++++++++++++--------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 70088a44a000..c1e98a61b4de 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -577,7 +577,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... - def __setitem__(self, indx, value): ... @property def shape(self) -> _ShapeT_co: ... @shape.setter diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 02655d7f88c7..2e2246fb88d2 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,3 +1,4 @@ +import datetime as dt from typing import Any, TypeAlias, TypeVar, cast import numpy as np @@ -15,11 +16,16 @@ MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) -MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], np.ma.MaskedArray(["a"], dtype="T")) +MAR_V: MaskedArray[np.void] = np.ma.MaskedArray( + [(1, 1)], + mask=[(False, False)], + dtype=[('a', int), ('b', int)] +) AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) @@ -38,6 +44,14 @@ MAR_f.flat = [9] +MAR_b[MAR_i > 0] = False +MAR_i[:] = 1 +MAR_f[[0]] = AR_LIKE_f +MAR_c[[0, 0]] = [3, 4 + 3j] +MAR_td64[0] = dt.timedelta(1) +MAR_dt64[0] = dt.datetime(2020, 1, 1) +MAR_V['a'] = [2] + # Inplace addition MAR_b += AR_LIKE_b @@ -65,10 +79,10 @@ MAR_td64 += AR_LIKE_u MAR_td64 += AR_LIKE_i MAR_td64 += AR_LIKE_m -MAR_M_dt64 += AR_LIKE_b -MAR_M_dt64 += AR_LIKE_u -MAR_M_dt64 += AR_LIKE_i -MAR_M_dt64 += AR_LIKE_m +MAR_dt64 += AR_LIKE_b +MAR_dt64 += AR_LIKE_u +MAR_dt64 += AR_LIKE_i +MAR_dt64 += AR_LIKE_m MAR_S += b'snakes' MAR_U += 'snakes' @@ -97,10 +111,10 @@ MAR_td64 -= AR_LIKE_u MAR_td64 -= AR_LIKE_i MAR_td64 -= AR_LIKE_m -MAR_M_dt64 -= AR_LIKE_b -MAR_M_dt64 -= AR_LIKE_u -MAR_M_dt64 -= AR_LIKE_i -MAR_M_dt64 -= AR_LIKE_m +MAR_dt64 -= AR_LIKE_b +MAR_dt64 -= AR_LIKE_u +MAR_dt64 -= AR_LIKE_i +MAR_dt64 -= AR_LIKE_m # Inplace floor division From 9da497f5c88a81669c954c3a9e328a15ce0a7220 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 1 Aug 2025 01:57:37 +0200 Subject: [PATCH 266/294] MAINT: bump `mypy` to `1.17.1` (#29493) --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 17de8d3eeb5e..dd7fa1a83e5b 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.16.1 + - mypy=1.17.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index e50919ef4f7b..b26a5f27bb05 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.16.1; platform_python_implementation != "PyPy" +mypy==1.17.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 60ad38e990073ee7bb6ca657139e67ea2bc8ad18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Aug 2025 17:13:36 +0000 Subject: [PATCH 267/294] MAINT: Bump pypa/cibuildwheel from 3.1.2 to 3.1.3 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.2 to 3.1.3. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/9e4e50bd76b3190f55304387e333f6234823ea9b...352e01339f0a173aa2a3eb57f01492e341e83865) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 5c1b35653d68..2b2ab586ba18 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 + - uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 3fd37bb60ee5..4fcd8a58f53a 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 + uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From f5a6af86acab7fcd1644fad76b5fbe466a0a98dd Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Sun, 3 Aug 2025 03:56:15 +0100 Subject: [PATCH 268/294] TYP: Type ``MaskedArray.__array_finalize__`` and ``MaskedArray.__array_wrap__`` (#29483) --- numpy/ma/core.pyi | 9 +++++++-- numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index c1e98a61b4de..e99e0a527773 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -54,6 +54,7 @@ from numpy import ( signedinteger, str_, timedelta64, + ufunc, unsignedinteger, void, ) @@ -520,8 +521,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF | None = None, ) -> _MaskedArray[Any]: ... - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def __array_wrap__( + self, + obj: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> MaskedArray[_ShapeT, _DTypeT]: ... @overload # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 441d39296ffa..a09fb2d75997 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -313,6 +313,8 @@ assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) assert_type(MAR_f4.put(4, 999, mode='clip'), None) +assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) + assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) From 4bb30632369548e1e91f37ecfd7eab4d9815b24f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Sun, 3 Aug 2025 05:07:01 +0200 Subject: [PATCH 269/294] Document `cpu-baeline-detect` --- doc/source/reference/simd/build-options.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 2b7039136e75..1f105e27077d 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -17,6 +17,11 @@ that target certain CPU features: During the runtime, NumPy modules will fail to load if any of specified features are not supported by the target CPU (raises Python runtime error). +- ``cpu-baseline-detect``: controls detection of CPU baseline based on compiler + flags. Default value is ``auto`` that enables detection if ``-march=`` + or a similar compiler flag is used. The other possible values are ``enabled`` + and ``disabled`` to respective enable or disable it unconditionally. + - ``cpu-dispatch``: dispatched set of additional CPU features. Default value is ``max -xop -fma4`` which enables all CPU features, except for AMD legacy features (in case of X86). @@ -182,7 +187,7 @@ Behaviors - ``cpu-baseline`` will be treated as "native" if compiler native flag ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS``:: + ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: export CFLAGS="-march=native" pip install . From ae772111208d2e0e536dac94830d13b13198b99e Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Tue, 5 Aug 2025 00:17:45 +0800 Subject: [PATCH 270/294] ENH: Use extern C in arraytypes.h.src file for cpp files (#29504) Signed-off-by: Wang Yang --- numpy/_core/src/multiarray/arraytypes.h.src | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/_core/src/multiarray/arraytypes.h.src b/numpy/_core/src/multiarray/arraytypes.h.src index a5613aa8dad6..ca8dbeaa67eb 100644 --- a/numpy/_core/src/multiarray/arraytypes.h.src +++ b/numpy/_core/src/multiarray/arraytypes.h.src @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "common.h" NPY_NO_EXPORT int @@ -165,4 +169,8 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT int BOOL_argmax, NPY_NO_EXPORT npy_intp count_nonzero_trivial_dispatcher(npy_intp count, const char* data, npy_intp stride, int dtype_num); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ */ From 02d0964feeec4af27787c02242eff26961200705 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 4 Aug 2025 14:22:59 -0400 Subject: [PATCH 271/294] BUG: Casting from one timedelta64 to another didn't handle NAT. Closes gh-29497. --- numpy/_core/src/multiarray/datetime.c | 15 +++++++++------ numpy/_core/tests/test_datetime.py | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index d820474532ca..0dac36a0903b 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -3118,15 +3118,18 @@ cast_datetime_to_datetime(PyArray_DatetimeMetaData *src_meta, */ NPY_NO_EXPORT int cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta, - PyArray_DatetimeMetaData *dst_meta, - npy_timedelta src_dt, - npy_timedelta *dst_dt) + PyArray_DatetimeMetaData *dst_meta, + npy_timedelta src_dt, + npy_timedelta *dst_dt) { npy_int64 num = 0, denom = 0; - /* If the metadata is the same, short-circuit the conversion */ - if (src_meta->base == dst_meta->base && - src_meta->num == dst_meta->num) { + /* + * If the metadata is the same or if src_dt is NAT, short-circuit + * the conversion. + */ + if ((src_meta->base == dst_meta->base && src_meta->num == dst_meta->num) + || src_dt == NPY_DATETIME_NAT) { *dst_dt = src_dt; return 0; } diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 888bb7db293b..70b55c500f3d 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -844,6 +844,21 @@ def test_timedelta_array_str(self): a = np.array([-1, 'NaT', 1234567], dtype=' Date: Tue, 5 Aug 2025 13:27:04 +0200 Subject: [PATCH 272/294] BLD: update vendored Meson to 1.8.3 (#29509) --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index f754c4258805..e72c717199fa 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit f754c4258805056ed7be09830d96af45215d341b +Subproject commit e72c717199fa18d34020c7c97f9de3f388c5e055 From 287cf8f7edadc99999da64f8be053f55b9c5d63e Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Tue, 5 Aug 2025 22:05:55 +0900 Subject: [PATCH 273/294] DOC: Add narrative documentation for printing NumPy arrays (#29502) * DOC: Add narrative documentation for printing NumPy arrays Resolves #29476. This commit adds a narrative guide on how to print NumPy arrays to the "how-to" section. [skip cirrus] [skip actions] [skip azp] * DOC: apply suggested changes to narrative documentation [skip cirrus] [skip actions] [skip azp] --- doc/source/user/how-to-print.rst | 112 +++++++++++++++++++++++++++++++ doc/source/user/howtos_index.rst | 1 + 2 files changed, 113 insertions(+) create mode 100644 doc/source/user/how-to-print.rst diff --git a/doc/source/user/how-to-print.rst b/doc/source/user/how-to-print.rst new file mode 100644 index 000000000000..6195b6ed4c70 --- /dev/null +++ b/doc/source/user/how-to-print.rst @@ -0,0 +1,112 @@ +.. _how-to-print: + +======================= + Printing NumPy Arrays +======================= + + +This page explains how to control the formatting of printed NumPy arrays. +Note that these printing options apply only to arrays, not to scalars. + +Defining printing options +========================= + +Applying settings globally +-------------------------- + +Use :func:`numpy.set_printoptions` to change printing options for the entire runtime session. To inspect current print settings, use :func:`numpy.get_printoptions`: + + >>> np.set_printoptions(precision=2) + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, 'floatmode': 'maxprec', 'precision': 2, 'suppress': False, 'linewidth': 75, 'nanstr': 'nan', 'infstr': 'inf', 'sign': '-', 'formatter': None, 'legacy': False, 'override_repr': None} + +To restore the default settings, use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + +Applying settings temporarily +----------------------------- + +Use :func:`numpy.printoptions` as a context manager to temporarily override print settings within a specific scope: + + + >>> arr = np.array([0.155, 0.184, 0.173]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.15 0.18 0.17] + + +All keywords that apply to :func:`numpy.set_printoptions` also apply to :func:`numpy.printoptions`. + + +Changing the number of digits of precision +========================================== + +The default number of fractional digits displayed is 8. You can change this number using ``precision`` keyword. + + >>> arr = np.array([0.1, 0.184, 0.17322]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.1 0.18 0.17] + + +The ``floatmode`` option determines how the ``precision`` setting is interpreted. +By default, ``floatmode=maxprec_equal`` displays values with the minimal number of digits needed to uniquely represent them, +using the same number of digits across all elements. +If you want to show exactly the same number of digits specified by ``precision``, use ``floatmode=fixed``: + + >>> arr = np.array([0.1, 0.184, 0.173], dtype=np.float32) + >>> with np.printoptions(precision=2, floatmode="fixed"): + ... print(arr) + [0.10 0.18 0.17] + + +Changing how `nan` and `inf` are displayed +========================================== + +By default, `numpy.nan` is displayed as `nan` and `numpy.inf` is displayed as `inf`. +You can override these representations using the ``nanstr`` and ``infstr`` options: + + >>> arr = np.array([np.inf, np.nan, 0]) + >>> with np.printoptions(nanstr="NAN", infstr="INF"): + ... print(arr) + [INF NAN 0.] + + +Controlling scientific notations +================================ + +By default, NumPy uses scientific notation when: + +- The absolute value of the smallest number is less than ``1e-4``, or +- The ratio of the largest to the smallest absolute value is greater than ``1e3`` + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> print(arr) + [2.00e-05 2.10e+05 3.14e+00] + +To suppress scientific notation and always use fixed-point notation, set ``suppress=True``: + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> with np.printoptions(suppress=True): + ... print(arr) + [ 0.00002 210000. 3.14 ] + + + +Applying custom formatting functions +==================================== + +You can apply custom formatting functions to specific or all data types using ``formatter`` keyword. +See :func:`numpy.set_printoptions` for more details on supported format keys. + +For example, to format `datetime64` values with a custom function: + + >>> arr = np.array([np.datetime64("2025-01-01"), np.datetime64("2024-01-01")]) + >>> with np.printoptions(formatter={"datetime":lambda x: f"(Year: {x.item().year}, Month: {x.item().month})"}): + ... print(arr) + [(Year: 2025, Month: 1) (Year: 2024, Month: 1)] + diff --git a/doc/source/user/howtos_index.rst b/doc/source/user/howtos_index.rst index ca30f7e9115d..a8a8229dd7dd 100644 --- a/doc/source/user/howtos_index.rst +++ b/doc/source/user/howtos_index.rst @@ -16,3 +16,4 @@ the package, see the :ref:`API reference `. how-to-index how-to-verify-bug how-to-partition + how-to-print From 6ad69259282d007bdfd15f12ea638b50b7e95c76 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 5 Aug 2025 10:44:27 -0600 Subject: [PATCH 274/294] TST: don't explicitly specify -j in TSAN build [skip cirrus] [skip azp] [skip circleci] --- .github/workflows/compiler_sanitizers.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 0581f7fc591b..a02f6f0c1609 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -89,7 +89,7 @@ jobs: run: pip uninstall -y pytest-xdist - name: Build NumPy with ThreadSanitizer - run: python -m spin build -j2 -- -Db_sanitize=thread + run: python -m spin build -- -Db_sanitize=thread - name: Run tests under prebuilt TSAN container run: | From d1a2ccc53b8c4ac13f1d10c6c1db4d5be1eecadd Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:52:40 -0400 Subject: [PATCH 275/294] MAINT: Bump hypothesis to 6.137.1 (#29513) --- requirements/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index b26a5f27bb05..44f73a844591 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -2,7 +2,7 @@ Cython wheel==0.38.1 setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' -hypothesis==6.104.1 +hypothesis==6.137.1 pytest==7.4.0 pytest-cov==4.1.0 meson From 72574772c1cefe1ccdabc254994a4b45602f977f Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 6 Aug 2025 13:20:28 -0400 Subject: [PATCH 276/294] BUG: random: Fix handling of very small p in Generator.binomial. Use `log1p` to compute an expression of `1 - p` with more precision. To not change the legacy interface, a copy of the unaltered function `random_binomial_inversion` is used in the legacy distributions code. --- .../random/src/distributions/distributions.c | 2 +- .../random/src/legacy/legacy-distributions.c | 42 ++++++++++++++++++- numpy/random/tests/test_generator_mt19937.py | 18 ++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index aa81a4a173d4..5ff694b2cde9 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -770,7 +770,7 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n, binomial->psave = p; binomial->has_binomial = 1; binomial->q = q = 1.0 - p; - binomial->r = qn = exp(n * log(q)); + binomial->r = qn = exp(n * log1p(-p)); binomial->c = np = n * p; binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); } else { diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 14d9ce25f255..385c4c239a57 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -228,6 +228,44 @@ double legacy_exponential(aug_bitgen_t *aug_state, double scale) { return scale * legacy_standard_exponential(aug_state); } +static RAND_INT_TYPE legacy_random_binomial_inversion( + bitgen_t *bitgen_state, RAND_INT_TYPE n, double p, binomial_t *binomial +) +{ + double q, qn, np, px, U; + RAND_INT_TYPE X, bound; + + if (!(binomial->has_binomial) || (binomial->nsave != n) || + (binomial->psave != p)) { + binomial->nsave = n; + binomial->psave = p; + binomial->has_binomial = 1; + binomial->q = q = 1.0 - p; + binomial->r = qn = exp(n * log(q)); + binomial->c = np = n * p; + binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); + } else { + q = binomial->q; + qn = binomial->r; + np = binomial->c; + bound = binomial->m; + } + X = 0; + px = qn; + U = next_double(bitgen_state); + while (U > px) { + X++; + if (X > bound) { + X = 0; + px = qn; + U = next_double(bitgen_state); + } else { + U -= px; + px = ((n - X + 1) * p * px) / (X * q); + } + } + return X; +} static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, double p, @@ -237,14 +275,14 @@ static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, if (p <= 0.5) { if (p * n <= 30.0) { - return random_binomial_inversion(bitgen_state, n, p, binomial); + return legacy_random_binomial_inversion(bitgen_state, n, p, binomial); } else { return random_binomial_btpe(bitgen_state, n, p, binomial); } } else { q = 1.0 - p; if (q * n <= 30.0) { - return n - random_binomial_inversion(bitgen_state, n, q, binomial); + return n - legacy_random_binomial_inversion(bitgen_state, n, q, binomial); } else { return n - random_binomial_btpe(bitgen_state, n, q, binomial); } diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 50c232d4a8e7..c32612218fcf 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -99,6 +99,24 @@ def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) + def test_p_extremely_small(self): + n = 50000000000 + p = 5e-17 + sample_size = 20000000 + x = random.binomial(n, p, size=sample_size) + sample_mean = x.mean() + expected_mean = n*p + sigma = np.sqrt(n*p*(1 - p)/sample_size) + # Note: the parameters were chosen so that expected_mean - 6*sigma + # is a positive value. The first `assert` below validates that + # assumption (in case someone edits the parameters in the future). + # The second `assert` is the actual test. + low_bound = expected_mean - 6*sigma + assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" + test_msg = (f"sample mean {sample_mean} deviates from the expected mean " + f"{expected_mean} by more than 6*sigma") + assert abs(expected_mean - sample_mean) < 6*sigma, test_msg + class TestMultinomial: def test_basic(self): From 3f7f06fd9a22bbe1c1450dc993fdb3fd57d91dbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 17:34:59 +0000 Subject: [PATCH 277/294] MAINT: Bump actions/download-artifact from 4.3.0 to 5.0.0 Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.3.0 to 5.0.0. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/d3f86a106a0bac45b974a628896c90dbdf5c8093...634f93cb2916e3fdff6788551b99b062d0335ce0) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 2b2ab586ba18..8269a8dcd705 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -73,7 +73,7 @@ jobs: (github.event_name == 'schedule') steps: - name: Download wheel artifact(s) - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 with: path: wheelhouse/ merge-multiple: true From 8a153faba14b7e65583682bf6af7ecf12f173c70 Mon Sep 17 00:00:00 2001 From: Maryanne Wachter Date: Wed, 6 Aug 2025 10:58:42 -0700 Subject: [PATCH 278/294] CI: Add UBSAN CI jobs for macOS arm64 and Linux x86-64 (#29487) --- .github/workflows/compiler_sanitizers.yml | 51 ++++++++++++++++++++--- tools/ci/ubsan_suppressions_arm64.txt | 51 +++++++++++++++++++++++ tools/ci/ubsan_suppressions_x86_64.txt | 31 ++++++++++++++ 3 files changed, 128 insertions(+), 5 deletions(-) create mode 100644 tools/ci/ubsan_suppressions_arm64.txt create mode 100644 tools/ci/ubsan_suppressions_x86_64.txt diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index a02f6f0c1609..dc65449ba752 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -21,7 +21,7 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - clang_ASAN: + clang_ASAN_UBSAN: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: macos-latest @@ -64,12 +64,14 @@ jobs: pip uninstall -y pytest-xdist - name: Build run: - python -m spin build -j2 -- -Db_sanitize=address + python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false - name: Test run: | # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + # Ignore test_casting_floatingpoint_errors on macOS for now - causes crash inside UBSAN ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ - python -m spin test -- -v -s --timeout=600 --durations=10 + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_arm64.txt \ + python -m spin test -- -k "not test_casting_floatingpoint_errors" -v -s --timeout=600 --durations=10 clang_TSAN: # To enable this workflow on a fork, comment out: @@ -78,9 +80,9 @@ jobs: container: image: ghcr.io/nascheme/numpy-tsan:3.14t options: --shm-size=2g # increase memory for large matrix ops - + steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -98,3 +100,42 @@ jobs: python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 + + ubuntu_UBSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: pyenv --version + - name: Build python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t + pyenv global 3.14t + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + - name: Build numpy with UndefinedBehaviorSanitizer + run: python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false + - name: Test + run: | + # pass -s to pytest to see UBSAN errors and warnings, otherwise pytest captures them + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_x86_64.txt \ + spin test -- -v -s --timeout=600 --durations=10 diff --git a/tools/ci/ubsan_suppressions_arm64.txt b/tools/ci/ubsan_suppressions_arm64.txt new file mode 100644 index 000000000000..69de4a4c425f --- /dev/null +++ b/tools/ci/ubsan_suppressions_arm64.txt @@ -0,0 +1,51 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for arm64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/common/simd/neon/memory.h +pointer-overflow:_core/src/multiarray/datetime_busdaycal.c +pointer-overflow:_core/src/multiarray/nditer_templ.c +pointer-overflow:_core/src/multiarray/nditer_constr.c +pointer-overflow:_core/src/umath/loops_arithm_fp.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_complex.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_fp_le.dispatch.c.src +pointer-overflow:_core/src/umath/string_buffer.h +pointer-overflow:linalg/umath_linalg.cpp +pointer-overflow:numpy/random/bit_generator.pyx.c + +float-cast-overflow:_core/src/multiarray/lowlevel_strided_loops.c.src + +# flagged in CI - call to function through pointer to incorrect function type +# Many functions in the modules/files listed below cause undefined behavior in CI +# general disable this check until further investigation, but keep the specific files +# as a starting point for resolving the checks later +function:_core/src/* +function:numpy/random/* +# function:_core/src/common/cblasfunc.c +# function:_core/src/common/npy_argparse.c +# function:_core/src/multiarray/number.c +# function:_core/src/multiarray/ctors.c +# function:_core/src/multiarray/convert_datatype.c +# function:_core/src/multiarray/dtype_transfer.c +# function:_core/src/multiarray/dtype_traversal.c +# function:_core/src/multiarray/getset.c +# function:_core/src/multiarray/scalarapi.c +# function:_core/src/multiarray/scalartypes.c.src +# function:_core/src/umath/* +# function:numpy/random/* diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt new file mode 100644 index 000000000000..d9872a691a81 --- /dev/null +++ b/tools/ci/ubsan_suppressions_x86_64.txt @@ -0,0 +1,31 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for x86_64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: replace left bit shift with LLONG_MIN constant +shift-base:_core/src/common/simd/sse/arithmetic.h +shift-base:_core/src/common/simd/avx2/arithmetic.h +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + + +# suggested fix for runtime error: check that pointer is not null before calling function +nonnull-attribute:_core/src/multiarray/array_coercion.c +nonnull-attribute:_core/src/multiarray/ctors.c +nonnull-attribute:_core/src/multiarray/datetime_busdaycal.c +nonnull-attribute:_core/src/multiarray/scalarapi.c +nonnull-attribute:_core/src/multiarray/calculation.c + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/multiarray/nditer_templ.c From 7ffe59ff1585de25b5f4e4da92fd7efc93aca2c8 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 6 Aug 2025 17:00:15 -0400 Subject: [PATCH 279/294] STY: Add whitespace to appease ruff (rule E226). --- numpy/random/tests/test_generator_mt19937.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index c32612218fcf..a06212efce0d 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -105,17 +105,17 @@ def test_p_extremely_small(self): sample_size = 20000000 x = random.binomial(n, p, size=sample_size) sample_mean = x.mean() - expected_mean = n*p - sigma = np.sqrt(n*p*(1 - p)/sample_size) + expected_mean = n * p + sigma = np.sqrt(n * p * (1 - p) / sample_size) # Note: the parameters were chosen so that expected_mean - 6*sigma # is a positive value. The first `assert` below validates that # assumption (in case someone edits the parameters in the future). # The second `assert` is the actual test. - low_bound = expected_mean - 6*sigma + low_bound = expected_mean - 6 * sigma assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" test_msg = (f"sample mean {sample_mean} deviates from the expected mean " f"{expected_mean} by more than 6*sigma") - assert abs(expected_mean - sample_mean) < 6*sigma, test_msg + assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg class TestMultinomial: From 0a7162cbe2bee464cf6a3ab4efc4b013f7467691 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:51:24 +0000 Subject: [PATCH 280/294] MAINT: Bump actions/cache from 4.2.3 to 4.2.4 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.3 to 4.2.4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/5a3ec84eff668545956fd18022155c47e93e2684...0400d5f644dc74513175e3cd8d07132dd4860809) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.2.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 1293e9c37c2f..84e221439aa6 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -213,7 +213,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 418dc7d52fc1..8931b76c5dee 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -46,7 +46,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -70,7 +70,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From bd6507407a335486e1bee38b6c0039d98fe3115c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:51:37 +0000 Subject: [PATCH 281/294] MAINT: Bump github/codeql-action from 3.29.5 to 3.29.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.5 to 3.29.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/51f77329afa6477de8c49fc9c7046c15b9a4e79d...a4e1a019f5e24960714ff6296aee04b736cbc3cf) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 2d5fdb08a9d4..59489ea79e65 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/init@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/autobuild@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/analyze@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 079813c3aea3..e2508a09bba7 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v2.1.27 + uses: github/codeql-action/upload-sarif@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v2.1.27 with: sarif_file: results.sarif From ed46cdeb9bc788c8fdd8c6c318e9af933b7e0edb Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Thu, 7 Aug 2025 16:14:24 -0700 Subject: [PATCH 282/294] DOC:clarify build compatibility to dev depending page Review page to make distinction clearer per GH discussion Standardized the use of C-API vs C API Closes #27265 --- doc/source/dev/depending_on_numpy.rst | 33 ++++++++++++++------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index 70476a3cc1b3..fd5c13885a74 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -26,12 +26,13 @@ well known scientific Python projects, does **not** use semantic versioning. Instead, backwards incompatible API changes require deprecation warnings for at least two releases. For more details, see :ref:`NEP23`. -NumPy has both a Python API and a C API. The C API can be used directly or via -Cython, f2py, or other such tools. If your package uses the C API, then ABI -(application binary interface) stability of NumPy is important. NumPy's ABI is -forward but not backward compatible. This means: binaries compiled against a -given target version of NumPy's C API will still run correctly with newer NumPy -versions, but not with older versions. +NumPy provides both a Python API and a C-API. The C-API can be accessed +directly or through tools like Cython or f2py. If your package uses the +C-API, it's important to understand NumPy's application binary interface +(ABI) compatibility: NumPy's ABI is forward compatible but not backward +compatible. This means that binaries compiled against an older version of +NumPy will still work with newer versions, but binaries compiled against a +newer version will not necessarily work with older ones. Modules can also be safely built against NumPy 2.0 or later in :ref:`CPython's abi3 mode `, which allows @@ -87,16 +88,16 @@ Build-time dependency `__. -If a package either uses the NumPy C API directly or it uses some other tool +If a package either uses the NumPy C-API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency of the package. -By default, NumPy will expose an API that is backwards compatible with the -oldest NumPy version that supports the currently oldest compatible Python -version. NumPy 1.25.0 supports Python 3.9 and higher and NumPy 1.19 is the -first version to support Python 3.9. Thus, we guarantee that, when using -defaults, NumPy 1.25 will expose a C-API compatible with NumPy 1.19. -(the exact version is set within NumPy-internal header files). +By default, NumPy exposes a API that is backward compatible with the earliest +NumPy version that supports the oldest Python version currently supported by +NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the +earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee +NumPy 1.25 will, when using defaults, expose a C API compatible with NumPy +1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release will require recompilation (see NumPy 2.0-specific advice further down). @@ -157,7 +158,7 @@ frequently, (b) use a large part of NumPy's API surface, and (c) is worried that changes in NumPy may break your code, you can set an upper bound of ``=2.0`` (or go. We'll focus on the "keep compatibility with 1.xx and 2.x" now, which is a little more involved. -*Example for a package using the NumPy C API (via C/Cython/etc.) which wants to support +*Example for a package using the NumPy C-API (via C/Cython/etc.) which wants to support NumPy 1.23.5 and up*: .. code:: ini From 4640d9317f76dcef11f1d9f92d35218ec3b32fd7 Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Thu, 7 Aug 2025 16:45:46 -0700 Subject: [PATCH 283/294] Update depending_on_numpy.rst --- doc/source/dev/depending_on_numpy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index fd5c13885a74..e3c03b0fea65 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -96,7 +96,7 @@ By default, NumPy exposes a API that is backward compatible with the earliest NumPy version that supports the oldest Python version currently supported by NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee -NumPy 1.25 will, when using defaults, expose a C API compatible with NumPy +NumPy 1.25 will, when using defaults, expose a C-API compatible with NumPy 1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release From a0515ad027cc61100987bbd8c783d1c121fbd803 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Fri, 8 Aug 2025 17:57:02 +0300 Subject: [PATCH 284/294] ENH: Use array indexing preparation routines for flatiter objects (#28590) * [ENH] Use array indexing preparation routines for flatiter objects * Fix assign subscript and add tests for it * Fix regression test * Remove unnecessary dims array * Add release note * Remove unnecessary branch from iter_subscript * Add more benchmarks * Add special cases to fix performance regression * Address some feedback - Add test with int16 index - Cast integer index array to intp before using it - Remove unused functions - Change error message back to array - Move docstring to prepare_index_noarray * Address more feedback * Address more feedback; raise error on non-array index * Fix linting errors * Update changelog item * Fix tests * Fix typing tests * Address review feedback * Fix linter errors * Address feedback * Remove wrong comments * Fix linting errors * Fix test_deprecations * Apply suggestions from code review * Update numpy/_core/tests/test_indexing.py --------- Co-authored-by: Sebastian Berg --- benchmarks/benchmarks/bench_indexing.py | 19 + .../upcoming_changes/28590.improvement.rst | 33 ++ numpy/_core/src/multiarray/iterators.c | 524 +++++++----------- numpy/_core/src/multiarray/mapping.c | 133 +++-- numpy/_core/src/multiarray/mapping.h | 22 + numpy/_core/tests/test_deprecations.py | 36 ++ numpy/_core/tests/test_indexing.py | 245 +++++++- numpy/_core/tests/test_regression.py | 4 +- numpy/lib/_shape_base_impl.py | 13 +- numpy/matrixlib/tests/test_masked_matrix.py | 2 +- numpy/typing/tests/data/fail/flatiter.pyi | 2 + numpy/typing/tests/data/pass/flatiter.py | 1 - 12 files changed, 624 insertions(+), 410 deletions(-) create mode 100644 doc/release/upcoming_changes/28590.improvement.rst diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 6ac124cac88d..81c812e81e19 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -134,6 +134,7 @@ def setup(self): self.m_half = np.copy(self.m_all) self.m_half[::2] = False self.m_none = np.repeat(False, 200 * 50000) + self.m_index_2d = np.arange(200 * 50000).reshape((100, 100000)) def time_flat_bool_index_none(self): self.a.flat[self.m_none] @@ -143,3 +144,21 @@ def time_flat_bool_index_half(self): def time_flat_bool_index_all(self): self.a.flat[self.m_all] + + def time_flat_fancy_index_2d(self): + self.a.flat[self.m_index_2d] + + def time_flat_empty_tuple_index(self): + self.a.flat[()] + + def time_flat_ellipsis_index(self): + self.a.flat[...] + + def time_flat_bool_index_0d(self): + self.a.flat[True] + + def time_flat_int_index(self): + self.a.flat[1_000_000] + + def time_flat_slice_index(self): + self.a.flat[1_000_000:2_000_000] diff --git a/doc/release/upcoming_changes/28590.improvement.rst b/doc/release/upcoming_changes/28590.improvement.rst new file mode 100644 index 000000000000..35f5cb3c2ad2 --- /dev/null +++ b/doc/release/upcoming_changes/28590.improvement.rst @@ -0,0 +1,33 @@ +Fix ``flatiter`` indexing edge cases +------------------------------------ + +The ``flatiter`` object now shares the same index preparation logic as +``ndarray``, ensuring consistent behavior and fixing several issues where +invalid indices were previously accepted or misinterpreted. + +Key fixes and improvements: + +* Stricter index validation + + - Boolean non-array indices like ``arr.flat[[True, True]]`` were + incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. + They now raise an index error. Note that indices that match the + iterator's shape are expected to not raise in the future and be + handled as regular boolean indices. Use ``np.asarray()`` if + you want to match that behavior. + - Float non-array indices were also cast to integer and incorrectly + treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now + deprecated and will be removed in a future version. + - 0-dimensional boolean indices like ``arr.flat[True]`` are also + deprecated and will be removed in a future version. + +* Consistent error types: + + Certain invalid `flatiter` indices that previously raised `ValueError` + now correctly raise `IndexError`, aligning with `ndarray` behavior. + +* Improved error messages: + + The error message for unsupported index operations now provides more + specific details, including explicitly listing the valid index types, + instead of the generic ``IndexError: unsupported index operation``. diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 7557662ad56c..704fd4738589 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -29,78 +29,6 @@ #define ELLIPSIS_INDEX -2 #define SINGLE_INDEX -3 -/* - * Tries to convert 'o' into an npy_intp interpreted as an - * index. Returns 1 if it was successful, 0 otherwise. Does - * not set an exception. - */ -static int -coerce_index(PyObject *o, npy_intp *v) -{ - *v = PyArray_PyIntAsIntp(o); - - if ((*v) == -1 && PyErr_Occurred()) { - PyErr_Clear(); - return 0; - } - return 1; -} - -/* - * This function converts one element of the indexing tuple - * into a step size and a number of steps, returning the - * starting index. Non-slices are signalled in 'n_steps', - * as NEWAXIS_INDEX, ELLIPSIS_INDEX, or SINGLE_INDEX. - */ -NPY_NO_EXPORT npy_intp -parse_index_entry(PyObject *op, npy_intp *step_size, - npy_intp *n_steps, npy_intp max, - int axis, int check_index) -{ - npy_intp i; - - if (op == Py_None) { - *n_steps = NEWAXIS_INDEX; - i = 0; - } - else if (op == Py_Ellipsis) { - *n_steps = ELLIPSIS_INDEX; - i = 0; - } - else if (PySlice_Check(op)) { - npy_intp stop; - if (PySlice_GetIndicesEx(op, max, &i, &stop, step_size, n_steps) < 0) { - goto fail; - } - if (*n_steps <= 0) { - *n_steps = 0; - *step_size = 1; - i = 0; - } - } - else if (coerce_index(op, &i)) { - *n_steps = SINGLE_INDEX; - *step_size = 0; - if (check_index) { - if (check_and_adjust_index(&i, max, axis, NULL) < 0) { - goto fail; - } - } - } - else { - PyErr_SetString(PyExc_IndexError, - "each index entry must be either a " - "slice, an integer, Ellipsis, or " - "newaxis"); - goto fail; - } - return i; - - fail: - return -1; -} - - /*********************** Element-wise Array Iterator ***********************/ /* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ /* and Python's array iterator ***/ @@ -427,7 +355,7 @@ iter_length(PyArrayIterObject *self) } -static PyArrayObject * +static PyObject * iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, NPY_cast_info *cast_info) { @@ -484,7 +412,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, } PyArray_ITER_RESET(self); } - return ret; + return (PyObject *) ret; } static PyObject * @@ -562,195 +490,154 @@ iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype = NULL; - PyArray_Descr *dtype; - npy_intp start, step_size; - npy_intp n_steps; - PyArrayObject *ret; - char *dptr; - int size; - PyObject *obj = NULL; - PyObject *new; + PyObject *ret = NULL; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; NPY_cast_info cast_info = {.func = NULL}; - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - obj = iter_subscript(self, ind); - Py_DECREF(ind); - return obj; + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + return NULL; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { - goto fail; - } - if (len == 0) { - Py_INCREF(self->ao); - return (PyObject *)self->ao; - } - ind = PyTuple_GET_ITEM(ind, 0); + + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - /* - * Tuples >1d not accepted --- i.e. no newaxis - * Could implement this with adjusted strides and dimensions in iterator - * Check for Boolean -- this is first because Bool is a subclass of Int - */ - PyArray_ITER_RESET(self); + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { + goto finish; + } - if (PyBool_Check(ind)) { - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - goto fail; + ret = iter_subscript(self, ind); + Py_DECREF(ind); + goto finish; + } + + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - return PyArray_ToScalar(self->dataptr, self->ao); + if (indices[0].value) { + ret = PyArray_ToScalar(self->dataptr, self->ao); + goto finish; } else { /* empty array */ npy_intp ii = 0; - dtype = PyArray_DESCR(self->ao); Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), - dtype, - 1, &ii, - NULL, NULL, 0, - (PyObject *)self->ao); - return (PyObject *)ret; + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), + dtype, + 1, &ii, + NULL, NULL, 0, + (PyObject *)self->ao); + goto finish; } } - dtype = PyArray_DESCR(self->ao); - size = dtype->elsize; + PyArray_ITER_RESET(self); - /* set up a cast to handle item copying */ + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { + goto finish; + } + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_ToScalar(self->dataptr, self->ao); + PyArray_ITER_RESET(self); + goto finish; + } + /* set up a cast to handle item copying */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; + /* We can assume the newly allocated output array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, size, size, dtype, dtype, 0, &cast_info, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { - goto fail; + goto finish; } - /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, - self->size, 0, 1); - if (start == -1) { - goto fail; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto fail; + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { + goto finish; } + PyArray_ITER_GOTO1D(self, start); - if (n_steps == SINGLE_INDEX) { /* Integer */ - PyObject *tmp; - tmp = PyArray_ToScalar(self->dataptr, self->ao); - PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return tmp; - } Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), dtype, 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); if (ret == NULL) { - goto fail; + goto finish; } - dptr = PyArray_DATA(ret); + + char *dptr = PyArray_DATA((PyArrayObject *) ret); while (n_steps--) { char *args[2] = {self->dataptr, dptr}; - npy_intp transfer_strides[2] = {size, size}; + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { - goto fail; + goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); - dptr += size; + dptr += dtype_size; } PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; + goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - if (obj == NULL) { - goto fail; - } - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_subscript_Bool(self, (PyArrayObject *) indices[0].object, &cast_info); + goto finish; } - if (!PyArray_Check(obj)) { - PyArrayObject *tmp_arr = (PyArrayObject *) PyArray_FROM_O(obj); - if (tmp_arr == NULL) { - goto fail; - } - - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); - Py_SETREF(obj, PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST)); - Py_DECREF(tmp_arr); - if (obj == NULL) { - goto fail; - } - } - else { - Py_SETREF(obj, (PyObject *) tmp_arr); + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } - } - - /* Check for Boolean array */ - if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) { - ret = iter_subscript_Bool(self, (PyArrayObject *)obj, &cast_info); + ret = iter_subscript_int(self, cast_array, &cast_info); + Py_DECREF(cast_array); goto finish; } - /* Only integer arrays left */ - if (!PyArray_ISINTEGER((PyArrayObject *)obj)) { - goto fail; - } - - Py_INCREF(indtype); - new = PyArray_FromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL); - if (new == NULL) { - goto fail; - } - ret = (PyArrayObject *)iter_subscript_int(self, (PyArrayObject *)new, - &cast_info); - Py_DECREF(new); - - finish: - Py_DECREF(indtype); - Py_DECREF(obj); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; - - fail: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); } - Py_XDECREF(indtype); - Py_XDECREF(obj); - NPY_cast_info_xfree(&cast_info); - - return NULL; - + return ret; } @@ -858,140 +745,132 @@ iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyArrayObject *arrval = NULL; - PyArrayIterObject *val_it = NULL; - PyArray_Descr *type; - PyArray_Descr *indtype = NULL; - int retval = -1; - npy_intp start, step_size; - npy_intp n_steps; - PyObject *obj = NULL; - NPY_cast_info cast_info = {.func = NULL}; - if (val == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete iterator elements"); return -1; } - if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) + if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) { return -1; + } - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - retval = iter_ass_subscript(self, ind, val); - Py_DECREF(ind); - return retval; + int ret = -1; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; + NPY_cast_info cast_info = {.func = NULL}; + + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + goto finish; + } + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + if (PyTuple_Check(ind)) { + PyErr_SetString(PyExc_IndexError, "Assigning to a flat iterator with a 0-D index is not supported"); + goto finish; + } + + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { goto finish; } - ind = PyTuple_GET_ITEM(ind, 0); - } - type = PyArray_DESCR(self->ao); + ret = iter_ass_subscript(self, ind, val); + Py_DECREF(ind); + goto finish; + } - /* - * Check for Boolean -- this is first because - * Bool is a subclass of Int - */ - - if (PyBool_Check(ind)) { - retval = 0; - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - return -1; + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - retval = PyArray_Pack( - PyArray_DESCR(self->ao), self->dataptr, val); + ret = 0; + if (indices[0].value) { + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); } goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) { - goto skip; - } - start = PyArray_PyIntAsIntp(ind); - if (error_converting(start)) { - PyErr_Clear(); - } - else { - if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { + PyArray_ITER_RESET(self); + + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { goto finish; } - PyArray_ITER_GOTO1D(self, start); - retval = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); PyArray_ITER_RESET(self); - if (retval < 0) { + if (ret < 0) { PyErr_SetString(PyExc_ValueError, "Error setting single item of array."); } goto finish; } - skip: - Py_INCREF(type); - arrval = (PyArrayObject *)PyArray_FromAny(val, type, 0, 0, + Py_INCREF(dtype); + PyArrayObject *arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); if (arrval == NULL) { - return -1; + goto finish; } - val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); + PyArrayIterObject *val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); if (val_it == NULL) { goto finish; } if (val_it->size == 0) { - retval = 0; + ret = 0; goto finish; } /* set up cast to handle single-element copies into arrval */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; - int itemsize = type->elsize; /* We can assume the newly allocated array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, itemsize, itemsize, type, type, 0, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { goto finish; } - /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, self->size, 0, 0); - if (start == -1) { - goto finish; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { goto finish; } + PyArray_ITER_GOTO1D(self, start); - npy_intp transfer_strides[2] = {itemsize, itemsize}; - if (n_steps == SINGLE_INDEX) { - char *args[2] = {PyArray_DATA(arrval), self->dataptr}; - if (cast_info.func(&cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { - goto finish; - } - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; while (n_steps--) { char *args[2] = {val_it->dataptr, self->dataptr}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); PyArray_ITER_NEXT(val_it); if (val_it->index == val_it->size) { @@ -999,60 +878,37 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) } } PyArray_ITER_RESET(self); - retval = 0; + ret = 0; goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_ass_sub_Bool(self, (PyArrayObject *) indices[0].object, val_it, &cast_info); + goto finish; } - if (obj != NULL && PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE((PyArrayObject *)obj)==NPY_BOOL) { - if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval=0; - } - /* Check for integer array */ - else if (PyArray_ISINTEGER((PyArrayObject *)obj)) { - PyObject *new; - Py_INCREF(indtype); - new = PyArray_CheckFromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_BEHAVED_NS, NULL); - Py_DECREF(obj); - obj = new; - if (new == NULL) { - goto finish; - } - if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval = 0; + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } + ret = iter_ass_sub_int(self, cast_array, val_it, &cast_info); + Py_DECREF(cast_array); + goto finish; } - finish: - if (!PyErr_Occurred() && retval < 0) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); - } - Py_XDECREF(indtype); - Py_XDECREF(obj); - Py_XDECREF(val_it); - Py_XDECREF(arrval); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return retval; - + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); + } + return ret; } diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 12f25534f1d0..28d6a5a26938 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -33,23 +33,6 @@ #include "umathmodule.h" -#define HAS_INTEGER 1 -#define HAS_NEWAXIS 2 -#define HAS_SLICE 4 -#define HAS_ELLIPSIS 8 -/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ -#define HAS_FANCY 16 -#define HAS_BOOL 32 -/* NOTE: Only set if it is neither fancy nor purely integer index! */ -#define HAS_SCALAR_ARRAY 64 -/* - * Indicate that this is a fancy index that comes from a 0d boolean. - * This means that the index does not operate along a real axis. The - * corresponding index type is just HAS_FANCY. - */ -#define HAS_0D_BOOL (HAS_FANCY | 128) - - static int _nonzero_indices(PyObject *myBool, PyArrayObject **arrays); @@ -263,20 +246,22 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n) * * Checks everything but the bounds. * - * @param self the array being indexed + * @param array_ndims The number of dimensions of the array being indexed (1 for iterators) + * @param array_dims The dimensions of the array being indexed (self->size for iterators) * @param index the index object * @param indices index info struct being filled (size of NPY_MAXDIMS * 2 + 1) * @param num number of indices found * @param ndim dimension of the indexing result * @param out_fancy_ndim dimension of the fancy/advanced indices part * @param allow_boolean whether to allow the boolean special case + * @param is_flatiter_object Whether the object indexed is an iterator * * @returns the index_type or -1 on failure and fills the number of indices. */ NPY_NO_EXPORT int -prepare_index(PyArrayObject *self, PyObject *index, - npy_index_info *indices, - int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object) { int new_ndim, fancy_ndim, used_ndim, index_ndim; int curr_idx, get_idx; @@ -314,8 +299,8 @@ prepare_index(PyArrayObject *self, PyObject *index, while (get_idx < index_ndim) { if (curr_idx > NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); goto failed_building_indices; } @@ -377,7 +362,7 @@ prepare_index(PyArrayObject *self, PyObject *index, * Since this is always an error if it was not a boolean, we can * allow the 0-d special case before the rest. */ - else if (PyArray_NDIM(self) != 0) { + else if (array_ndims != 0) { /* * Single integer index, there are two cases here. * It could be an array, a 0-d array is handled @@ -418,17 +403,55 @@ prepare_index(PyArrayObject *self, PyObject *index, goto failed_building_indices; } + // We raise here because we changed the behavior for boolean + // indices for flat iterators from being handled as integers + // to being regular boolean indices. + // TODO: This should go away fairly soon and lists of booleans + // should be handled as regular boolean indices. + if (is_flatiter_object && PyArray_ISBOOL(tmp_arr) && !PyBool_Check(index)) { + Py_DECREF(tmp_arr); + PyErr_Format(PyExc_IndexError, + "boolean indices for iterators are not supported because " + "of previous behavior that was confusing (valid boolean " + "indices are expected to work in the future)" + ); + goto failed_building_indices; + } + /* * For example an empty list can be cast to an integer array, * however it will default to a float one. */ - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + if (PyArray_SIZE(tmp_arr) == 0 + || (is_flatiter_object && !PyArray_ISINTEGER(tmp_arr) && !PyArray_ISBOOL(tmp_arr))) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); arr = (PyArrayObject *)PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST); + + // If the cast succeeded (which means that the previous flat iterator + // indexing routine would have succeeded as well), we need to issue a + // deprecation warning. + if (arr + && is_flatiter_object + && PyArray_SIZE(tmp_arr) != 0 + && !PyArray_ISINTEGER(tmp_arr) + && !PyArray_ISBOOL(tmp_arr) + && DEPRECATE("Invalid non-array indices for iterator objects are deprecated and will be " + "removed in a future version. (Deprecated NumPy 2.4)") < 0) { + Py_DECREF(tmp_arr); + goto failed_building_indices; + } Py_DECREF(tmp_arr); if (arr == NULL) { + // Raise a helpful error if this was a ValueError (i.e. could not cast) + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); + } goto failed_building_indices; } } @@ -458,9 +481,9 @@ prepare_index(PyArrayObject *self, PyObject *index, * this is always an error. The check ensures that these errors are raised * and match those of the generic path. */ - if ((PyArray_NDIM(arr) == PyArray_NDIM(self)) + if ((PyArray_NDIM(arr) == array_ndims) && PyArray_CompareLists(PyArray_DIMS(arr), - PyArray_DIMS(self), + array_dims, PyArray_NDIM(arr))) { index_type = HAS_BOOL; @@ -468,8 +491,8 @@ prepare_index(PyArrayObject *self, PyObject *index, indices[curr_idx].object = (PyObject *)arr; /* keep track anyway, just to be complete */ - used_ndim = PyArray_NDIM(self); - fancy_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; + fancy_ndim = array_ndims; curr_idx += 1; break; } @@ -524,8 +547,8 @@ prepare_index(PyArrayObject *self, PyObject *index, /* Check that we will not run out of indices to store new ones */ if (curr_idx + n >= NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); for (i=0; i < n; i++) { Py_DECREF(nonzero_result[i]); } @@ -603,10 +626,11 @@ prepare_index(PyArrayObject *self, PyObject *index, } else { /* The input was not an array, so give a general error message */ - PyErr_SetString(PyExc_IndexError, - "only integers, slices (`:`), ellipsis (`...`), " - "numpy.newaxis (`None`) and integer or boolean " - "arrays are valid indices"); + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); } Py_DECREF(arr); goto failed_building_indices; @@ -616,10 +640,10 @@ prepare_index(PyArrayObject *self, PyObject *index, * Compare dimension of the index to the real ndim. this is * to find the ellipsis value or append an ellipsis if necessary. */ - if (used_ndim < PyArray_NDIM(self)) { + if (used_ndim < array_ndims) { if (index_type & HAS_ELLIPSIS) { - indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim; - used_ndim = PyArray_NDIM(self); + indices[ellipsis_pos].value = array_ndims - used_ndim; + used_ndim = array_ndims; new_ndim += indices[ellipsis_pos].value; } else { @@ -630,19 +654,21 @@ prepare_index(PyArrayObject *self, PyObject *index, index_type |= HAS_ELLIPSIS; indices[curr_idx].object = NULL; indices[curr_idx].type = HAS_ELLIPSIS; - indices[curr_idx].value = PyArray_NDIM(self) - used_ndim; + indices[curr_idx].value = array_ndims - used_ndim; ellipsis_pos = curr_idx; - used_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; new_ndim += indices[curr_idx].value; curr_idx += 1; } } - else if (used_ndim > PyArray_NDIM(self)) { + else if (used_ndim > array_ndims) { PyErr_Format(PyExc_IndexError, - "too many indices for array: " - "array is %d-dimensional, but %d were indexed", - PyArray_NDIM(self), + "too many indices for %s: " + "%s is %d-dimensional, but %d were indexed", + is_flatiter_object ? "flat iterator" : "array", + is_flatiter_object ? "flat iterator" : "array", + array_ndims, used_ndim); goto failed_building_indices; } @@ -697,14 +723,15 @@ prepare_index(PyArrayObject *self, PyObject *index, used_ndim = 0; for (i = 0; i < curr_idx; i++) { if ((indices[i].type == HAS_FANCY) && indices[i].value > 0) { - if (indices[i].value != PyArray_DIM(self, used_ndim)) { + if (indices[i].value != array_dims[used_ndim]) { char err_msg[174]; PyOS_snprintf(err_msg, sizeof(err_msg), - "boolean index did not match indexed array along " + "boolean index did not match indexed %s along " "axis %d; size of axis is %" NPY_INTP_FMT " but size of corresponding boolean axis is %" NPY_INTP_FMT, - used_ndim, PyArray_DIM(self, used_ndim), + is_flatiter_object ? "flat iterator" : "array", + used_ndim, array_dims[used_ndim], indices[i].value); PyErr_SetString(PyExc_IndexError, err_msg); goto failed_building_indices; @@ -740,6 +767,16 @@ prepare_index(PyArrayObject *self, PyObject *index, return -1; } +NPY_NO_EXPORT int +prepare_index(PyArrayObject *self, PyObject *index, + npy_index_info *indices, + int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +{ + return prepare_index_noarray(PyArray_NDIM(self), PyArray_DIMS(self), + index, indices, num, ndim, out_fancy_ndim, + allow_boolean, 0); +} + /** * Check if self has memory overlap with one of the index arrays, or with extra_op. diff --git a/numpy/_core/src/multiarray/mapping.h b/numpy/_core/src/multiarray/mapping.h index 528cb6604892..d4577c78fdbb 100644 --- a/numpy/_core/src/multiarray/mapping.h +++ b/numpy/_core/src/multiarray/mapping.h @@ -3,6 +3,23 @@ extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; +/* Indexing types */ +#define HAS_INTEGER 1 +#define HAS_NEWAXIS 2 +#define HAS_SLICE 4 +#define HAS_ELLIPSIS 8 +/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ +#define HAS_FANCY 16 +#define HAS_BOOL 32 +/* NOTE: Only set if it is neither fancy nor purely integer index! */ +#define HAS_SCALAR_ARRAY 64 +/* + * Indicate that this is a fancy index that comes from a 0d boolean. + * This means that the index does not operate along a real axis. The + * corresponding index type is just HAS_FANCY. + */ +#define HAS_0D_BOOL (HAS_FANCY | 128) + /* * Object to store information needed for advanced (also fancy) indexing. @@ -113,6 +130,11 @@ typedef struct { } npy_index_info; +NPY_NO_EXPORT int +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object); + NPY_NO_EXPORT Py_ssize_t array_length(PyArrayObject *self); diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index d4f2e9984266..75f092a51808 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -480,3 +480,39 @@ def test_deprecated(self): def test_not_deprecated(self, align): # if the user passes a bool, it is accepted. self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + + +class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + message = r"Indexing flat iterators with a 0-dimensional boolean index" + + def test_0d_boolean_index_deprecated(self): + arr = np.arange(3) + # 0d boolean indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[True]) + + def test_0d_boolean_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[True] = 10 + + self.assert_deprecated(assign_to_index) + + +class TestFlatiterIndexingFloatIndex(_DeprecationTestCase): + # Deprecated in NumPy 2.4, 2025-07 + message = r"Invalid non-array indices for iterator objects" + + def test_float_index_deprecated(self): + arr = np.arange(3) + # float indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[[1.]]) + + def test_float_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[[1.]] = 10 + + self.assert_deprecated(assign_to_index) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 2a8a669d0787..c65468ebd24a 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -614,22 +614,6 @@ def test_nontuple_ndindex(self): assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) assert_raises(IndexError, a.__getitem__, [slice(None)]) - def test_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([0, 5, 6]) - assert_equal(a.flat[b.flat], np.array([0, 5, 6])) - - def test_empty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([], dtype="S") - assert_equal(a.flat[b.flat], np.array([])) - - def test_nonempty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array(["a"], dtype="S") - with pytest.raises(IndexError, match="unsupported iterator index"): - a.flat[b.flat] - class TestFieldIndexing: def test_scalar_return_type(self): @@ -1453,3 +1437,232 @@ def test_setitem(self): a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10]) + + +class TestFlatiterIndexing: + def test_flatiter_indexing_single_integer(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[0], 0) + assert_array_equal(a.flat[4], 4) + assert_array_equal(a.flat[-1], 8) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] + + def test_flatiter_indexing_slice(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[:], np.arange(9)) + assert_array_equal(a.flat[:5], np.arange(5)) + assert_array_equal(a.flat[5:10], np.arange(5, 9)) + assert_array_equal(a.flat[::2], np.arange(0, 9, 2)) + assert_array_equal(a.flat[::-1], np.arange(8, -1, -1)) + assert_array_equal(a.flat[10:5], np.array([])) + + assert_array_equal(a.flat[()], np.arange(9)) + assert_array_equal(a.flat[...], np.arange(9)) + + def test_flatiter_indexing_boolean(self): + a = np.arange(9).reshape((3, 3)) + + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[True], 0) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[False], np.array([])) + + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + assert_array_equal(a.flat[mask], np.arange(0, 9, 2)) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] + + def test_flatiter_indexing_fancy(self): + a = np.arange(9).reshape((3, 3)) + + indices = np.array([1, 3, 5]) + assert_array_equal(a.flat[indices], indices) + + assert_array_equal(a.flat[[-1, -2]], np.array([8, 7])) + + indices_2d = np.array([[1, 2], [3, 4]]) + assert_array_equal(a.flat[indices_2d], indices_2d) + + assert_array_equal(a.flat[[True, 1]], np.array([1, 1])) + + assert_array_equal(a.flat[[]], np.array([], dtype=a.dtype)) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] + assert_array_equal(a.flat[np.asarray([True, False, True])], np.array([0, 2])) + + def test_flatiter_indexing_not_supported_newaxis_mutlidimensional_float(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] + + with pytest.raises(IndexError, + match=r"too many indices for flat iterator: flat iterator " + r"is 1-dimensional, but 2 were indexed"): + a.flat[1, 2] + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + assert_array_equal(a.flat[[1.0, 2.0]], np.array([1, 2])) + + def test_flatiter_assign_single_integer(self): + a = np.arange(9).reshape((3, 3)) + + a.flat[0] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a.flat[4] = 20 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 8]])) + + a.flat[-1] = 30 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 30]])) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] = 40 + + def test_flatiter_indexing_slice_assign(self): + a = np.arange(9).reshape((3, 3)) + a.flat[:] = 10 + assert_array_equal(a, np.full((3, 3), 10)) + + a = np.arange(9).reshape((3, 3)) + a.flat[:5] = 20 + assert_array_equal(a, np.array([[20, 20, 20], [20, 20, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[5:10] = 30 + assert_array_equal(a, np.array([[0, 1, 2], [3, 4, 30], [30, 30, 30]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::2] = 40 + assert_array_equal(a, np.array([[40, 1, 40], [3, 40, 5], [40, 7, 40]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::-1] = 50 + assert_array_equal(a, np.full((3, 3), 50)) + + a = np.arange(9).reshape((3, 3)) + a.flat[10:5] = 60 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match="Assigning to a flat iterator with a 0-D index"): + a.flat[()] = 70 + + a = np.arange(9).reshape((3, 3)) + a.flat[...] = 80 + assert_array_equal(a, np.full((3, 3), 80)) + + def test_flatiter_indexing_boolean_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[True] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[False] = 20 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + a.flat[mask] = 30 + assert_array_equal(a, np.array([[30, 1, 30], [3, 30, 5], [30, 7, 30]])) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] = 40 + + def test_flatiter_indexing_fancy_assign(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5]) + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + a.flat[[-1, -2]] = 20 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 20, 20]])) + + a = np.arange(9).reshape((3, 3)) + indices_2d = np.array([[1, 2], [3, 4]]) + a.flat[indices_2d] = 30 + assert_array_equal(a, np.array([[0, 30, 30], [30, 30, 5], [6, 7, 8]])) + + a.flat[[True, 1]] = 40 + assert_array_equal(a, np.array([[0, 40, 30], [30, 30, 5], [6, 7, 8]])) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] = 50 + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] = 20 + a.flat[np.asarray([True, False, True])] = 20 + assert_array_equal(a, np.array([20, 1, 20])) + + def test_flatiter_indexing_fancy_int16_dtype(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5], dtype=np.int16) + assert_array_equal(a.flat[indices], np.array([1, 3, 5])) + + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + def test_flatiter_indexing_not_supported_newaxis_mutlid_float_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] = 10 + + a.flat[[1, 2]] = 10 + assert_array_equal(a, np.array([[0, 10, 10], [3, 4, 5], [6, 7, 8]])) + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + a.flat[[1.0, 2.0]] = 20 + assert_array_equal(a, np.array([[0, 20, 20], [3, 4, 5], [6, 7, 8]])) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + # This is arguably incorrect, and should be removed (ideally with + # deprecation). But it matches the array path and comes from not + # distinguishing `arr[np.array([]).flat]` and `arr[[]]` and the latter + # must pass. + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " + r"and integer or boolean arrays are valid indices"): + a.flat[b.flat] diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 2aeb29a1320d..8ad9a26fcc9a 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -851,8 +851,8 @@ def ia(x, s, v): assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) # Old special case (different code path): - assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) - assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + assert_raises(IndexError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x.flat, s, np.zeros(11, dtype=float)) def test_mem_scalar_indexing(self): # Ticket #603 diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index c44d603611ee..8b200cd8daa4 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1,6 +1,7 @@ import functools import warnings +import numpy as np import numpy._core.numeric as _nx from numpy._core import atleast_3d, overrides, vstack from numpy._core._multiarray_umath import _array_converter @@ -171,15 +172,13 @@ def take_along_axis(arr, indices, axis=-1): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat - arr_shape = (len(arr),) # flatiter has no .shape + arr = np.array(arr.flat) axis = 0 else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - return arr[_make_along_axis_idx(arr_shape, indices, axis)] + return arr[_make_along_axis_idx(arr.shape, indices, axis)] def _put_along_axis_dispatcher(arr, indices, values, axis): @@ -263,15 +262,13 @@ def put_along_axis(arr, indices, values, axis): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat + arr = np.array(arr.flat) axis = 0 - arr_shape = (len(arr),) # flatiter has no .shape else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + arr[_make_along_axis_idx(arr.shape, indices, axis)] = values def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index e6df047ee6ca..3f9414ff7d30 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -116,7 +116,7 @@ def test_flat(self): # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] + testflat[:] = testflat[np.array([2, 1, 0])] assert_equal(test, control) testflat[0] = 9 # test that matrices keep the correct shape (#4615) diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index 06e23fed9e3f..cdf2a8b78b7b 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -18,3 +18,5 @@ a.copy(order='C') # type: ignore[call-arg] a[np.bool()] # type: ignore[index] a[Index()] # type: ignore[call-overload] a[supports_array] # type: ignore[index] + +a[[0, 1, 2]] diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index e64e4261b8e7..cc7c6069a89a 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -9,7 +9,6 @@ iter(a) next(a) a[0] -a[[0, 1, 2]] a[...] a[:] a.__array__() From c7aae66faf994facdf74f648cd94554f11740a5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 17:34:04 +0000 Subject: [PATCH 285/294] MAINT: Bump github/codeql-action from 3.29.6 to 3.29.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.6 to 3.29.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/a4e1a019f5e24960714ff6296aee04b736cbc3cf...76621b61decf072c1cee8dd1ce2d2a82d33c17ed) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 59489ea79e65..cbf79b02142b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e2508a09bba7..cf637aa947a0 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v2.1.27 + uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v2.1.27 with: sarif_file: results.sarif From e11fea7c7b0567b9b0e5558d295a1cbca4877be7 Mon Sep 17 00:00:00 2001 From: Zhi Li Date: Fri, 8 Aug 2025 16:35:54 -0400 Subject: [PATCH 286/294] DOC: Add 'today' string to datetime64 documentation (#29514) --- doc/source/reference/arrays.datetime.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 8dbff88c918e..2095dc3aea49 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -53,7 +53,8 @@ months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and some additional SI-prefix seconds-based units. The `datetime64` data type also accepts the string "NAT", in any combination of lowercase/uppercase -letters, for a "Not A Time" value. +letters, for a "Not A Time" value. The string "today" is also supported and +returns the current UTC date with day precision. .. admonition:: Example @@ -91,6 +92,11 @@ letters, for a "Not A Time" value. >>> np.datetime64('nat') np.datetime64('NaT') + The current date: + + >>> np.datetime64('today') + np.datetime64('2025-08-05') # result will depend on the current date + When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the datetime type with generic units. From 0f39fd27ff149b25f74fc58d589c92b4fd881226 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Oct 2024 20:43:03 +0100 Subject: [PATCH 287/294] BLD: update licensing metadata to use PEP 639 --- LICENSES_bundled.txt | 36 ------------- meson.build | 9 ++-- .../_core/src/multiarray/dragon4_LICENSE.txt | 27 ++++++++++ pyproject.toml | 52 +++++++++++++++++-- tools/wheels/check_license.py | 2 +- tools/wheels/cibw_before_build.sh | 1 - 6 files changed, 78 insertions(+), 49 deletions(-) delete mode 100644 LICENSES_bundled.txt create mode 100644 numpy/_core/src/multiarray/dragon4_LICENSE.txt diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt deleted file mode 100644 index b3d8aa8bed06..000000000000 --- a/LICENSES_bundled.txt +++ /dev/null @@ -1,36 +0,0 @@ -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - -Name: spin -Files: .spin/cmds.py -License: BSD-3 - For license text, see .spin/LICENSE - -Name: tempita -Files: numpy/_build_utils/tempita/* -License: MIT - For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/meson.build b/meson.build index 0d436352cbbd..2cb7ce987ad5 100644 --- a/meson.build +++ b/meson.build @@ -1,12 +1,9 @@ project( 'NumPy', 'c', 'cpp', 'cython', - version: run_command( - # This should become `numpy/_version.py` in NumPy 2.0 - ['numpy/_build_utils/gitversion.py'], - check: true).stdout().strip(), - license: 'BSD-3', - meson_version: '>=1.5.2', # version in vendored-meson is 1.5.2 + version: run_command(['numpy/_build_utils/gitversion.py'], check: true).stdout().strip(), + license: 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0', + meson_version: '>=1.8.3', # version in vendored-meson default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', diff --git a/numpy/_core/src/multiarray/dragon4_LICENSE.txt b/numpy/_core/src/multiarray/dragon4_LICENSE.txt new file mode 100644 index 000000000000..7bd49e7074a8 --- /dev/null +++ b/numpy/_core/src/multiarray/dragon4_LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2014 Ryan Juckett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +dragon4.c|h h contains a modified version of Ryan Juckett's Dragon4 +implementation, obtained from https://www.ryanjuckett.com, +which has been ported from C++ to C and which has +modifications specific to printing floats in numpy. + +Ryan Juckett's original code was under the Zlib license; he gave numpy +permission to include it under the MIT license instead. diff --git a/pyproject.toml b/pyproject.toml index f9cfeadee599..b678be83a486 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,16 +1,13 @@ [build-system] build-backend = "mesonpy" requires = [ - "meson-python>=0.15.0", + "meson-python>=0.18.0", "Cython>=3.0.6", # keep in sync with version check in meson.build ] [project] name = "numpy" version = "2.4.0.dev0" -# TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) -license = {file = "LICENSE.txt"} - description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ @@ -22,7 +19,6 @@ classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', @@ -40,6 +36,52 @@ classifiers = [ 'Operating System :: Unix', 'Operating System :: MacOS', ] +# License info: +# - The main NumPy project license is BSD-3-Clause. +# - The SPDX license expression below reflects installed numpy packages when +# built from source (e.g., with `python -m build --wheel`), with no vendoring. +# - That SPDX expression is therefore incomplete for: +# (a) sdists - see the comment below `license-files` for other licenses +# included in the sdist +# (b) wheels on PyPI - most wheels include vendored libraries with additional licenses: +# - libopenblas : BSD-3-Clause AND BSD-3-Clause-Attribution (all except arm64 macOS>=14) +# - libgfortran : GPL-3.0-with-GCC-exception (all except arm64 macOS>=14) +# - libquadmath : LGPL-2.1-or-later (all except arm64 macOS>=14 and Windows) +# The licenses for these vendored components are dynamically included +# in the build process for PyPI wheels. +license = 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0' +license-files = [ + 'LICENSE.txt', # BSD-3-Clause + 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib + 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD + 'numpy/_core/src/highway/LICENSE-BSD3', # BSD-3-Clause + 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT + 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause + 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause + 'numpy/fft/pocketfft/LICENSE.md', # BSD-3-Clause + 'numpy/ma/LICENSE', # BSD-3-Clause + 'numpy/linalg/lapack_lite/LICENSE.txt', # BSD-3-Clause + 'numpy/random/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/distributions/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/mt19937/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/pcg64/LICENSE.md', # MIT + 'numpy/random/src/philox/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/sfc64/LICENSE.md', # MIT + 'numpy/random/src/splitmix64/LICENSE.md', # CC0-1.0 +] +# The license files below apply only to files in the repo and sdist, not to +# installed `numpy` packages or wheels (build/doc tools don't affect the +# license of the installed package). We have to make a choice whether to add +# those to the SPDX expression above since PEP 639 is unclear on the +# differences; we choose to make the SPDX expression reflect *a wheel built +# from the sources*. +# '.spin/LICENSE', # BSD-3-Clause +# 'doc/source/_static/scipy-mathjax/LICENSE', # Apache-2.0 +# 'numpy/_build_utils/tempita/LICENSE.txt', # BSD-3-Clause +# 'vendored-meson/meson/COPYING', # Apache-2.0 +# Note that the commented out license files are still included in the sdist, +# just not in Core Metadata and in the .dist-info directory. + [project.scripts] f2py = 'numpy.f2py.f2py2e:main' diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py index 572295b4ca2f..9aa50015d00b 100644 --- a/tools/wheels/check_license.py +++ b/tools/wheels/check_license.py @@ -38,7 +38,7 @@ def main(): distinfo_path = next(iter(sitepkgs.glob("numpy-*.dist-info"))) # Check license text - license_txt = distinfo_path / "LICENSE.txt" + license_txt = distinfo_path / "licenses" / "LICENSE.txt" with open(license_txt, encoding="utf-8") as f: text = f.read() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index e41e5d37316b..ed2640471fed 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -10,7 +10,6 @@ rm -rf build echo "" >> $PROJECT_DIR/LICENSE.txt echo "----" >> $PROJECT_DIR/LICENSE.txt echo "" >> $PROJECT_DIR/LICENSE.txt -cat $PROJECT_DIR/LICENSES_bundled.txt >> $PROJECT_DIR/LICENSE.txt if [[ $RUNNER_OS == "Linux" ]] ; then cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt elif [[ $RUNNER_OS == "macOS" ]]; then From 7c34da750a1f46de91ac66d34e5feed62de2d018 Mon Sep 17 00:00:00 2001 From: Maryanne Wachter Date: Mon, 11 Aug 2025 13:03:15 -0700 Subject: [PATCH 288/294] BUG: left bit shift undefined behavior (#29539) --- numpy/_core/src/common/simd/avx2/arithmetic.h | 6 +++--- numpy/_core/src/common/simd/sse/arithmetic.h | 6 +++--- tools/ci/ubsan_suppressions_x86_64.txt | 3 --- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/common/simd/avx2/arithmetic.h b/numpy/_core/src/common/simd/avx2/arithmetic.h index 58d842a6d3a4..15b9be85dc51 100644 --- a/numpy/_core/src/common/simd/avx2/arithmetic.h +++ b/numpy/_core/src/common/simd/avx2/arithmetic.h @@ -215,9 +215,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m256i q = _mm256_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m256i sigb = npyv_setall_s64(1LL << 63); - q = _mm256_srl_epi64(_mm256_add_epi64(q, sigb), shf1); - q = _mm256_sub_epi64(q, _mm256_srl_epi64(sigb, shf1)); + const __m256i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm256_srl_epi64(_mm256_add_epi64(q, sbit), shf1); + q = _mm256_sub_epi64(q, _mm256_srl_epi64(sbit, shf1)); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm256_sub_epi64(q, asign); diff --git a/numpy/_core/src/common/simd/sse/arithmetic.h b/numpy/_core/src/common/simd/sse/arithmetic.h index 357b136d25cd..b50942ab75ad 100644 --- a/numpy/_core/src/common/simd/sse/arithmetic.h +++ b/numpy/_core/src/common/simd/sse/arithmetic.h @@ -251,9 +251,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m128i q = _mm_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m128i sigb = npyv_setall_s64(1LL << 63); - q = _mm_srl_epi64(_mm_add_epi64(q, sigb), divisor.val[1]); - q = _mm_sub_epi64(q, _mm_srl_epi64(sigb, divisor.val[1])); + const __m128i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm_srl_epi64(_mm_add_epi64(q, sbit), divisor.val[1]); + q = _mm_sub_epi64(q, _mm_srl_epi64(sbit, divisor.val[1])); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm_sub_epi64(q, asign); diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt index d9872a691a81..5e4316ce3715 100644 --- a/tools/ci/ubsan_suppressions_x86_64.txt +++ b/tools/ci/ubsan_suppressions_x86_64.txt @@ -11,9 +11,6 @@ signed-integer-overflow:* # otherwise ubsan may detect system file alignment errors outside numpy alignment:* -# suggested fix for runtime error: replace left bit shift with LLONG_MIN constant -shift-base:_core/src/common/simd/sse/arithmetic.h -shift-base:_core/src/common/simd/avx2/arithmetic.h # suggested fix for runtime error: use INT_MIN constant shift-base:_core/src/umath/_rational_tests.c # suggested fix for runtime error: check for overflow if signed From a2f142b6039413dfb43f79aee16dfa3b19c37d1f Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 11 Aug 2025 21:29:06 +0100 Subject: [PATCH 289/294] TYP: Type ``MaskedIterator`` (#29526) --- numpy/ma/core.pyi | 85 ++++++++++++++++++++++++--- numpy/typing/tests/data/pass/ma.py | 7 ++- numpy/typing/tests/data/reveal/ma.pyi | 10 +++- 3 files changed, 91 insertions(+), 11 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e99e0a527773..55e5867247bb 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,15 +1,20 @@ # pyright: reportIncompatibleMethodOverride=false # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 +import datetime as dt from _typeshed import Incomplete -from collections.abc import Sequence +from collections.abc import Iterator, Sequence from typing import ( Any, + Generic, Literal, Never, NoReturn, Self, + SupportsComplex, + SupportsFloat, SupportsIndex, + SupportsInt, TypeAlias, overload, ) @@ -37,6 +42,7 @@ from numpy import ( dtype, dtypes, expand_dims, + flexible, float16, float32, float64, @@ -82,9 +88,11 @@ from numpy._typing import ( _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _CharLike_co, _DTypeLike, _DTypeLikeBool, _IntLike_co, + _NestedSequence, _ScalarLike_co, _Shape, _ShapeLike, @@ -296,6 +304,12 @@ _MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _ArrayInt_co: TypeAlias = NDArray[integer | bool_] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + MaskType = bool_ nomask: bool_[Literal[False]] @@ -447,15 +461,68 @@ masked_print_option: _MaskedPrintOption def flatten_structured_array(a): ... -class MaskedIterator: - ma: Any +class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): + ma: MaskedArray[_ShapeT_co, _DTypeT_co] dataiter: Any maskiter: Any - def __init__(self, ma): ... - def __iter__(self): ... - def __getitem__(self, indx): ... - def __setitem__(self, index, value): ... - def __next__(self): ... + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + + # Similar to `MaskedArray.__getitem__` but without the `void` case. + @overload + def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # Similar to `ndarray.__setitem__` but without the `void` case. + @overload # flexible | object_ | bool + def __setitem__( + self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + index: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: MaskedIterator[Any, dtype[integer]], + index: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: MaskedIterator[Any, dtype[floating]], + index: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: MaskedIterator[Any, dtype[complexfloating]], + index: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: MaskedIterator[Any, dtype[timedelta64]], + index: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: MaskedIterator[Any, dtype[datetime64]], + index: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... + + def __next__(self) -> Any: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any @@ -607,7 +674,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property - def flat(self) -> MaskedIterator: ... + def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter def flat(self, value: ArrayLike, /) -> None: ... @property diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 2e2246fb88d2..3ccea66861eb 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -42,7 +42,12 @@ MAR_i.fill_value = 0 -MAR_f.flat = [9] +MAR_b.flat[MAR_i > 0] = False +MAR_i.flat[:] = 1 +MAR_f.flat[[0]] = AR_LIKE_f +MAR_c.flat[[0, 0]] = [3, 4 + 3j] +MAR_td64.flat[0] = dt.timedelta(1) +MAR_dt64.flat[0] = dt.datetime(2020, 1, 1) MAR_b[MAR_i > 0] = False MAR_i[:] = 1 diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a09fb2d75997..bd4e4e98373a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -462,7 +462,15 @@ assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) -assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) +assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat.ma, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[0, 0], Any) +assert_type(MAR_2d_f4.flat[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[..., -1], MaskedArray[np.float32]) def invalid_resize() -> None: assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] From dbae155b78e2dd47dbf3697d40732ddc75bac1ea Mon Sep 17 00:00:00 2001 From: MyUserNameWasTakenLinux Date: Mon, 11 Aug 2025 15:36:40 -0600 Subject: [PATCH 290/294] STY: fix typo in dtypemeta.c [skip azp][skip actions] --- numpy/_core/src/multiarray/dtypemeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 5ac7ef3b2320..293d7dfee1b3 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1398,7 +1398,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; /*NUMPY_API - * Fetch the ArrFuncs struct which new lives on the DType and not the + * Fetch the ArrFuncs struct which now lives on the DType and not the * descriptor. Use of this struct should be avoided but remains necessary * for certain functionality. * From 05dbc23b5245b4f0e6fec7ef2329cffb85d3c62e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 01:44:01 +0000 Subject: [PATCH 291/294] MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/11bd71901bbe5b1630ceea73d27597364c9af683...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 6 +++--- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 6 +++--- .github/workflows/windows.yml | 6 +++--- 15 files changed, 47 insertions(+), 47 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbf79b02142b..267d4c5b768f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index dc65449ba752..e91468b85068 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -26,7 +26,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -82,7 +82,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -106,7 +106,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 174d04efb567..9d95da102fee 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -18,7 +18,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 5036a94ce399..158a826825f1 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 8269a8dcd705..a1d5b399988e 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -43,7 +43,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 7ac5ea673d61..40e372cbf3e4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -33,7 +33,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-depth: 0 @@ -60,7 +60,7 @@ jobs: matrix: version: ["3.11", "3.12", "3.13", "3.14-dev", "3.14t-dev"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -75,7 +75,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -94,7 +94,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -124,7 +124,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -163,7 +163,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-22.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -199,7 +199,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-22.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -241,7 +241,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -280,7 +280,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -315,13 +315,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -353,7 +353,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 54d217cc12fb..633de1fbb84c 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -65,7 +65,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -163,7 +163,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -194,7 +194,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -223,7 +223,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -260,7 +260,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -285,7 +285,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -349,7 +349,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -386,7 +386,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 84e221439aa6..53780ae097a3 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -91,7 +91,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -198,7 +198,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a9f065e25cc0..dcc689739d20 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -176,7 +176,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -191,7 +191,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -242,7 +242,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8931b76c5dee..da0972678b15 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -29,7 +29,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -118,7 +118,7 @@ jobs: version: ["3.11", "3.14t-dev"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 81fa57239b9b..e362a29f628a 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -50,7 +50,7 @@ jobs: - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index bfbf34fa7817..544c568a522d 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -24,7 +24,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cf637aa947a0..50fb7a12f432 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v3.1.0 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 4fcd8a58f53a..e87af415d3c5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -49,7 +49,7 @@ jobs: message: ${{ steps.commit_message.outputs.message }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 # Gets the correct commit message for pull request with: ref: ${{ github.event.pull_request.head.sha }} @@ -115,7 +115,7 @@ jobs: IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true persist-credentials: false @@ -245,7 +245,7 @@ jobs: # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e723b787a5de..dba4f74496b2 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -99,7 +99,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -173,7 +173,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true From 8276cb4cb3f3031bad510a776b6be8b3ffc1695f Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Tue, 12 Aug 2025 19:18:07 +0900 Subject: [PATCH 292/294] fix: File exists error --- tools/ci/check_c_api_usage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh index 9f4203284823..b04410d7667d 100644 --- a/tools/ci/check_c_api_usage.sh +++ b/tools/ci/check_c_api_usage.sh @@ -24,7 +24,7 @@ echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." # Prepare a result file mkdir -p .tmp -OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX.txt) +OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX) echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT FAIL=0 From bb198ee93eb15afd80b2706d1b9b483edaf4bf55 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 19:59:18 +0000 Subject: [PATCH 293/294] MAINT: Bump github/codeql-action from 3.29.8 to 3.29.9 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.8 to 3.29.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/76621b61decf072c1cee8dd1ce2d2a82d33c17ed...df559355d593797519d70b90fc8edd5db049e7a2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbf79b02142b..61d9c6ec2248 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cf637aa947a0..50871457563e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v2.1.27 + uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v2.1.27 with: sarif_file: results.sarif From 7683d859f6e81735d66289bd1a8a3d3b84186d67 Mon Sep 17 00:00:00 2001 From: Leonardo Paredes <49594379+phdparedes@users.noreply.github.com> Date: Wed, 13 Aug 2025 07:25:51 -0700 Subject: [PATCH 294/294] BUG: allow `MaskedArray.fill_value` be a string when `dtype=StringDType` (#29423) * BUG: allow ma.MaskedArray.fill_value be a string when dtype=StringDType This fix adds the StringDType data type character code 'T' to the list of accepted data types that can use a 'fill_value' as string. Currently just accepts None. "See #29421" Issue. * TST: add tests for StringDType MaskedArray use * TST: add tests for fill_value in StringDType MaskedArray * TST: fix format in the test functions added * Add default fill value 'N/A' for StringDType * TST: Add tests for StringDType masked array default fill and slicing * DOC: add release note for StringDType fill_value support (gh-29423) --------- Co-authored-by: Nathan Goldbaum --- .../upcoming_changes/29423.new_feature.rst | 7 ++ numpy/ma/core.py | 26 ++++--- numpy/ma/tests/test_core.py | 77 +++++++++++++++++-- 3 files changed, 93 insertions(+), 17 deletions(-) create mode 100644 doc/release/upcoming_changes/29423.new_feature.rst diff --git a/doc/release/upcoming_changes/29423.new_feature.rst b/doc/release/upcoming_changes/29423.new_feature.rst new file mode 100644 index 000000000000..7e83604b0049 --- /dev/null +++ b/doc/release/upcoming_changes/29423.new_feature.rst @@ -0,0 +1,7 @@ +``StringDType`` fill_value support in `numpy.ma.MaskedArray` +------------------------------------------------------------ +Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` when +using the variable‑width ``StringDType`` (kind ``'T'``), including through slicing +and views. The default is ``'N/A'`` and may be overridden by any valid string. +This fixes issue `gh‑29421 `__ and was +implemented in pull request `gh‑29423 `__. diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9b705460dd85..621dbd94640b 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -181,7 +181,8 @@ class MaskError(MAError): 'S': b'N/A', 'u': 999999, 'V': b'???', - 'U': 'N/A' + 'U': 'N/A', + 'T': 'N/A' } # Add datetime64 and timedelta64 types @@ -264,16 +265,17 @@ def default_fill_value(obj): The default filling value depends on the datatype of the input array or the type of the input scalar: - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== + =========== ======== + datatype default + =========== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + StringDType 'N/A' + =========== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. @@ -498,7 +500,7 @@ def _check_fill_value(fill_value, ndtype): fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) - elif isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + elif isinstance(fill_value, str) and (ndtype.char not in 'OSTVU'): # Note this check doesn't work if fill_value is not a scalar err_msg = "Cannot set fill value of string with array of dtype %s" raise TypeError(err_msg % ndtype) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 5327963999e0..bdef61dac3ba 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1810,7 +1810,7 @@ def test_eq_ne_structured_extra(self): el_by_el = [m1[name] != m2[name] for name in dt.names] assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_eq_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1842,7 +1842,7 @@ def test_eq_for_strings(self, dt, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_ne_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1992,15 +1992,23 @@ def test_comparisons_for_numeric(self, op, dt1, dt2, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('op', [operator.le, operator.lt, operator.ge, operator.gt]) @pytest.mark.parametrize('fill', [None, "N/A"]) - def test_comparisons_strings(self, op, fill): + def test_comparisons_strings(self, dt, op, fill): # See gh-21770, mask propagation is broken for strings (and some other # cases) so we explicitly test strings here. # In principle only == and != may need special handling... - ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) - ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + if isinstance(fill, str): + fill = np.array(fill, dtype=dt) + + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") @@ -5692,6 +5700,65 @@ def test_default_fill_value_complex(): assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) +def test_string_dtype_fill_value_on_construction(): + # Regression test for gh-29421: allow string fill_value on StringDType masked arrays + dt = np.dtypes.StringDType() + data = np.array(["A", "test", "variable", ""], dtype=dt) + mask = [True, False, True, True] + # Prior to the fix, this would TypeError; now it should succeed + arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt) + assert isinstance(arr.fill_value, str) + assert arr.fill_value == "FILL" + filled = arr.filled() + # Masked positions should be replaced by 'FILL' + assert filled.tolist() == ["FILL", "test", "FILL", "FILL"] + + +def test_string_dtype_default_fill_value(): + # Regression test for gh-29421: default fill_value for StringDType is 'N/A' + dt = np.dtypes.StringDType() + data = np.array(['x', 'y', 'z'], dtype=dt) + # no fill_value passed → uses default_fill_value internally + arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt) + # ensure it’s stored as a Python str and equals the expected default + assert isinstance(arr.fill_value, str) + assert arr.fill_value == 'N/A' + # masked slots should be replaced by that default + assert arr.filled().tolist() == ['N/A', 'y', 'N/A'] + + +def test_string_dtype_fill_value_persists_through_slice(): + # Regression test for gh-29421: .fill_value survives slicing/viewing + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ['a', 'b', 'c'], + mask=[True, False, True], + dtype=dt + ) + arr.fill_value = 'Z' + # slice triggers __array_finalize__ + sub = arr[1:] + # the slice should carry the same fill_value and behavior + assert isinstance(sub.fill_value, str) + assert sub.fill_value == 'Z' + assert sub.filled().tolist() == ['b', 'Z'] + + +def test_setting_fill_value_attribute(): + # Regression test for gh-29421: setting .fill_value post-construction works too + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ["x", "longstring", "mid"], mask=[False, True, False], dtype=dt + ) + # Setting the attribute should not raise + arr.fill_value = "Z" + assert arr.fill_value == "Z" + # And filled() should use the new fill_value + assert arr.filled()[0] == "x" + assert arr.filled()[1] == "Z" + assert arr.filled()[2] == "mid" + + def test_ufunc_with_output(): # check that giving an output argument always returns that output. # Regression test for gh-8416.