diff --git a/.venv/bin/f2py b/.venv/bin/f2py
new file mode 100755
index 00000000..d9ecae3e
--- /dev/null
+++ b/.venv/bin/f2py
@@ -0,0 +1,8 @@
+#!/home/mgph/Desktop/?/MAESTRIA/HYDROGEN_PROJ/Analysis_Data/py-data-analysis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from numpy.f2py.f2py2e import main
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/.venv/bin/numpy-config b/.venv/bin/numpy-config
new file mode 100755
index 00000000..52b3acbf
--- /dev/null
+++ b/.venv/bin/numpy-config
@@ -0,0 +1,8 @@
+#!/home/mgph/Desktop/?/MAESTRIA/HYDROGEN_PROJ/Analysis_Data/py-data-analysis/.venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+from numpy._configtool import main
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/LICENSE.txt b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/LICENSE.txt
new file mode 100644
index 00000000..284458b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/LICENSE.txt
@@ -0,0 +1,971 @@
+Copyright (c) 2005-2025, NumPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the NumPy Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----
+
+The NumPy repository and source distributions bundle several libraries that are
+compatibly licensed. We list these here.
+
+Name: lapack-lite
+Files: numpy/linalg/lapack_lite/*
+License: BSD-3-Clause
+ For details, see numpy/linalg/lapack_lite/LICENSE.txt
+
+Name: dragon4
+Files: numpy/_core/src/multiarray/dragon4.c
+License: MIT
+ For license text, see numpy/_core/src/multiarray/dragon4.c
+
+Name: libdivide
+Files: numpy/_core/include/numpy/libdivide/*
+License: Zlib
+ For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt
+
+
+Note that the following files are vendored in the repository and sdist but not
+installed in built numpy packages:
+
+Name: Meson
+Files: vendored-meson/meson/*
+License: Apache 2.0
+ For license text, see vendored-meson/meson/COPYING
+
+Name: spin
+Files: .spin/cmds.py
+License: BSD-3
+ For license text, see .spin/LICENSE
+
+Name: tempita
+Files: numpy/_build_utils/tempita/*
+License: MIT
+ For details, see numpy/_build_utils/tempita/LICENCE.txt
+
+----
+
+This binary distribution of NumPy also bundles the following software:
+
+
+Name: OpenBLAS
+Files: numpy.libs/libscipy_openblas*.so
+Description: bundled as a dynamically linked library
+Availability: https://github.com/OpenMathLib/OpenBLAS/
+License: BSD-3-Clause
+ Copyright (c) 2011-2014, The OpenBLAS Project
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ 3. Neither the name of the OpenBLAS project nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: LAPACK
+Files: numpy.libs/libscipy_openblas*.so
+Description: bundled in OpenBLAS
+Availability: https://github.com/OpenMathLib/OpenBLAS/
+License: BSD-3-Clause-Open-MPI
+ Copyright (c) 1992-2013 The University of Tennessee and The University
+ of Tennessee Research Foundation. All rights
+ reserved.
+ Copyright (c) 2000-2013 The University of California Berkeley. All
+ rights reserved.
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
+ reserved.
+
+ $COPYRIGHT$
+
+ Additional copyrights may follow
+
+ $HEADER$
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+ - Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ The copyright holders provide no reassurances that the source code
+ provided does not infringe any patent, copyright, or any other
+ intellectual property rights of third parties. The copyright holders
+ disclaim any liability to any recipient for claims brought against
+ recipient by any third party for infringement of that parties
+ intellectual property rights.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: GCC runtime library
+Files: numpy.libs/libgfortran*.so
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
+License: GPL-3.0-or-later WITH GCC-exception-3.1
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
+
+ Libgfortran is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgfortran is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ .
+
+----
+
+Full text of license texts referred to above follows (that they are
+listed below does not necessarily imply the conditions apply to the
+present binary release):
+
+----
+
+GCC RUNTIME LIBRARY EXCEPTION
+
+Version 3.1, 31 March 2009
+
+Copyright (C) 2009 Free Software Foundation, Inc.
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+This GCC Runtime Library Exception ("Exception") is an additional
+permission under section 7 of the GNU General Public License, version
+3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+bears a notice placed by the copyright holder of the file stating that
+the file is governed by GPLv3 along with this Exception.
+
+When you use GCC to compile a program, GCC may combine portions of
+certain GCC header files and runtime libraries with the compiled
+program. The purpose of this Exception is to allow compilation of
+non-GPL (including proprietary) programs to use, in this way, the
+header files and runtime libraries covered by this Exception.
+
+0. Definitions.
+
+A file is an "Independent Module" if it either requires the Runtime
+Library for execution after a Compilation Process, or makes use of an
+interface provided by the Runtime Library, but is not otherwise based
+on the Runtime Library.
+
+"GCC" means a version of the GNU Compiler Collection, with or without
+modifications, governed by version 3 (or a specified later version) of
+the GNU General Public License (GPL) with the option of using any
+subsequent versions published by the FSF.
+
+"GPL-compatible Software" is software whose conditions of propagation,
+modification and use would permit combination with GCC in accord with
+the license of GCC.
+
+"Target Code" refers to output from any compiler for a real or virtual
+target processor architecture, in executable form or suitable for
+input to an assembler, loader, linker and/or execution
+phase. Notwithstanding that, Target Code does not include data in any
+format that is used as a compiler intermediate representation, or used
+for producing a compiler intermediate representation.
+
+The "Compilation Process" transforms code entirely represented in
+non-intermediate languages designed for human-written code, and/or in
+Java Virtual Machine byte code, into Target Code. Thus, for example,
+use of source code generators and preprocessors need not be considered
+part of the Compilation Process, since the Compilation Process can be
+understood as starting with the output of the generators or
+preprocessors.
+
+A Compilation Process is "Eligible" if it is done using GCC, alone or
+with other GPL-compatible software, or if it is done without using any
+work based on GCC. For example, using non-GPL-compatible Software to
+optimize any GCC intermediate representations would not qualify as an
+Eligible Compilation Process.
+
+1. Grant of Additional Permission.
+
+You have permission to propagate a work of Target Code formed by
+combining the Runtime Library with Independent Modules, even if such
+propagation would otherwise violate the terms of GPLv3, provided that
+all Target Code was generated by Eligible Compilation Processes. You
+may then convey such a combination under terms of your choice,
+consistent with the licensing of the Independent Modules.
+
+2. No Weakening of GCC Copyleft.
+
+The availability of this Exception does not imply any general
+presumption that third-party software is unaffected by the copyleft
+requirements of the license of GCC.
+
+----
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+ .
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
+
+Name: libquadmath
+Files: numpy.libs/libquadmath*.so
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
+License: LGPL-2.1-or-later
+
+ GCC Quad-Precision Math Library
+ Copyright (C) 2010-2019 Free Software Foundation, Inc.
+ Written by Francois-Xavier Coudert
+
+ This file is part of the libquadmath library.
+ Libquadmath is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ Libquadmath is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+ https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/METADATA b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/METADATA
new file mode 100644
index 00000000..f9788044
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/METADATA
@@ -0,0 +1,1093 @@
+Metadata-Version: 2.1
+Name: numpy
+Version: 2.3.3
+Summary: Fundamental package for array computing in Python
+Author: Travis E. Oliphant et al.
+Maintainer-Email: NumPy Developers
+License: Copyright (c) 2005-2025, NumPy Developers.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the NumPy Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----
+
+ The NumPy repository and source distributions bundle several libraries that are
+ compatibly licensed. We list these here.
+
+ Name: lapack-lite
+ Files: numpy/linalg/lapack_lite/*
+ License: BSD-3-Clause
+ For details, see numpy/linalg/lapack_lite/LICENSE.txt
+
+ Name: dragon4
+ Files: numpy/_core/src/multiarray/dragon4.c
+ License: MIT
+ For license text, see numpy/_core/src/multiarray/dragon4.c
+
+ Name: libdivide
+ Files: numpy/_core/include/numpy/libdivide/*
+ License: Zlib
+ For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt
+
+
+ Note that the following files are vendored in the repository and sdist but not
+ installed in built numpy packages:
+
+ Name: Meson
+ Files: vendored-meson/meson/*
+ License: Apache 2.0
+ For license text, see vendored-meson/meson/COPYING
+
+ Name: spin
+ Files: .spin/cmds.py
+ License: BSD-3
+ For license text, see .spin/LICENSE
+
+ Name: tempita
+ Files: numpy/_build_utils/tempita/*
+ License: MIT
+ For details, see numpy/_build_utils/tempita/LICENCE.txt
+
+ ----
+
+ This binary distribution of NumPy also bundles the following software:
+
+
+ Name: OpenBLAS
+ Files: numpy.libs/libscipy_openblas*.so
+ Description: bundled as a dynamically linked library
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
+ License: BSD-3-Clause
+ Copyright (c) 2011-2014, The OpenBLAS Project
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ 3. Neither the name of the OpenBLAS project nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ Name: LAPACK
+ Files: numpy.libs/libscipy_openblas*.so
+ Description: bundled in OpenBLAS
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
+ License: BSD-3-Clause-Open-MPI
+ Copyright (c) 1992-2013 The University of Tennessee and The University
+ of Tennessee Research Foundation. All rights
+ reserved.
+ Copyright (c) 2000-2013 The University of California Berkeley. All
+ rights reserved.
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
+ reserved.
+
+ $COPYRIGHT$
+
+ Additional copyrights may follow
+
+ $HEADER$
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+ - Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ The copyright holders provide no reassurances that the source code
+ provided does not infringe any patent, copyright, or any other
+ intellectual property rights of third parties. The copyright holders
+ disclaim any liability to any recipient for claims brought against
+ recipient by any third party for infringement of that parties
+ intellectual property rights.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ Name: GCC runtime library
+ Files: numpy.libs/libgfortran*.so
+ Description: dynamically linked to files compiled with gcc
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
+ License: GPL-3.0-or-later WITH GCC-exception-3.1
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
+
+ Libgfortran is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgfortran is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ .
+
+ ----
+
+ Full text of license texts referred to above follows (that they are
+ listed below does not necessarily imply the conditions apply to the
+ present binary release):
+
+ ----
+
+ GCC RUNTIME LIBRARY EXCEPTION
+
+ Version 3.1, 31 March 2009
+
+ Copyright (C) 2009 Free Software Foundation, Inc.
+
+ Everyone is permitted to copy and distribute verbatim copies of this
+ license document, but changing it is not allowed.
+
+ This GCC Runtime Library Exception ("Exception") is an additional
+ permission under section 7 of the GNU General Public License, version
+ 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+ bears a notice placed by the copyright holder of the file stating that
+ the file is governed by GPLv3 along with this Exception.
+
+ When you use GCC to compile a program, GCC may combine portions of
+ certain GCC header files and runtime libraries with the compiled
+ program. The purpose of this Exception is to allow compilation of
+ non-GPL (including proprietary) programs to use, in this way, the
+ header files and runtime libraries covered by this Exception.
+
+ 0. Definitions.
+
+ A file is an "Independent Module" if it either requires the Runtime
+ Library for execution after a Compilation Process, or makes use of an
+ interface provided by the Runtime Library, but is not otherwise based
+ on the Runtime Library.
+
+ "GCC" means a version of the GNU Compiler Collection, with or without
+ modifications, governed by version 3 (or a specified later version) of
+ the GNU General Public License (GPL) with the option of using any
+ subsequent versions published by the FSF.
+
+ "GPL-compatible Software" is software whose conditions of propagation,
+ modification and use would permit combination with GCC in accord with
+ the license of GCC.
+
+ "Target Code" refers to output from any compiler for a real or virtual
+ target processor architecture, in executable form or suitable for
+ input to an assembler, loader, linker and/or execution
+ phase. Notwithstanding that, Target Code does not include data in any
+ format that is used as a compiler intermediate representation, or used
+ for producing a compiler intermediate representation.
+
+ The "Compilation Process" transforms code entirely represented in
+ non-intermediate languages designed for human-written code, and/or in
+ Java Virtual Machine byte code, into Target Code. Thus, for example,
+ use of source code generators and preprocessors need not be considered
+ part of the Compilation Process, since the Compilation Process can be
+ understood as starting with the output of the generators or
+ preprocessors.
+
+ A Compilation Process is "Eligible" if it is done using GCC, alone or
+ with other GPL-compatible software, or if it is done without using any
+ work based on GCC. For example, using non-GPL-compatible Software to
+ optimize any GCC intermediate representations would not qualify as an
+ Eligible Compilation Process.
+
+ 1. Grant of Additional Permission.
+
+ You have permission to propagate a work of Target Code formed by
+ combining the Runtime Library with Independent Modules, even if such
+ propagation would otherwise violate the terms of GPLv3, provided that
+ all Target Code was generated by Eligible Compilation Processes. You
+ may then convey such a combination under terms of your choice,
+ consistent with the licensing of the Independent Modules.
+
+ 2. No Weakening of GCC Copyleft.
+
+ The availability of this Exception does not imply any general
+ presumption that third-party software is unaffected by the copyleft
+ requirements of the license of GCC.
+
+ ----
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+ software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+ to take away your freedom to share and change the works. By contrast,
+ the GNU General Public License is intended to guarantee your freedom to
+ share and change all versions of a program--to make sure it remains free
+ software for all its users. We, the Free Software Foundation, use the
+ GNU General Public License for most of our software; it applies also to
+ any other work released this way by its authors. You can apply it to
+ your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+ price. Our General Public Licenses are designed to make sure that you
+ have the freedom to distribute copies of free software (and charge for
+ them if you wish), that you receive source code or can get it if you
+ want it, that you can change the software or use pieces of it in new
+ free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+ these rights or asking you to surrender the rights. Therefore, you have
+ certain responsibilities if you distribute copies of the software, or if
+ you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+ gratis or for a fee, you must pass on to the recipients the same
+ freedoms that you received. You must make sure that they, too, receive
+ or can get the source code. And you must show them these terms so they
+ know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+ (1) assert copyright on the software, and (2) offer you this License
+ giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+ that there is no warranty for this free software. For both users' and
+ authors' sake, the GPL requires that modified versions be marked as
+ changed, so that their problems will not be attributed erroneously to
+ authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+ modified versions of the software inside them, although the manufacturer
+ can do so. This is fundamentally incompatible with the aim of
+ protecting users' freedom to change the software. The systematic
+ pattern of such abuse occurs in the area of products for individuals to
+ use, which is precisely where it is most unacceptable. Therefore, we
+ have designed this version of the GPL to prohibit the practice for those
+ products. If such problems arise substantially in other domains, we
+ stand ready to extend this provision to those domains in future versions
+ of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+ States should not allow patents to restrict development and use of
+ software on general-purpose computers, but in those that do, we wish to
+ avoid the special danger that patents applied to a free program could
+ make it effectively proprietary. To prevent this, the GPL assures that
+ patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+ modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+ works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+ License. Each licensee is addressed as "you". "Licensees" and
+ "recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+ in a fashion requiring copyright permission, other than the making of an
+ exact copy. The resulting work is called a "modified version" of the
+ earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+ on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+ permission, would make you directly or secondarily liable for
+ infringement under applicable copyright law, except executing it on a
+ computer or modifying a private copy. Propagation includes copying,
+ distribution (with or without modification), making available to the
+ public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+ parties to make or receive copies. Mere interaction with a user through
+ a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+ to the extent that it includes a convenient and prominently visible
+ feature that (1) displays an appropriate copyright notice, and (2)
+ tells the user that there is no warranty for the work (except to the
+ extent that warranties are provided), that licensees may convey the
+ work under this License, and how to view a copy of this License. If
+ the interface presents a list of user commands or options, such as a
+ menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+ for making modifications to it. "Object code" means any non-source
+ form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+ standard defined by a recognized standards body, or, in the case of
+ interfaces specified for a particular programming language, one that
+ is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+ than the work as a whole, that (a) is included in the normal form of
+ packaging a Major Component, but which is not part of that Major
+ Component, and (b) serves only to enable use of the work with that
+ Major Component, or to implement a Standard Interface for which an
+ implementation is available to the public in source code form. A
+ "Major Component", in this context, means a major essential component
+ (kernel, window system, and so on) of the specific operating system
+ (if any) on which the executable work runs, or a compiler used to
+ produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+ the source code needed to generate, install, and (for an executable
+ work) run the object code and to modify the work, including scripts to
+ control those activities. However, it does not include the work's
+ System Libraries, or general-purpose tools or generally available free
+ programs which are used unmodified in performing those activities but
+ which are not part of the work. For example, Corresponding Source
+ includes interface definition files associated with source files for
+ the work, and the source code for shared libraries and dynamically
+ linked subprograms that the work is specifically designed to require,
+ such as by intimate data communication or control flow between those
+ subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+ can regenerate automatically from other parts of the Corresponding
+ Source.
+
+ The Corresponding Source for a work in source code form is that
+ same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+ copyright on the Program, and are irrevocable provided the stated
+ conditions are met. This License explicitly affirms your unlimited
+ permission to run the unmodified Program. The output from running a
+ covered work is covered by this License only if the output, given its
+ content, constitutes a covered work. This License acknowledges your
+ rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+ convey, without conditions so long as your license otherwise remains
+ in force. You may convey covered works to others for the sole purpose
+ of having them make modifications exclusively for you, or provide you
+ with facilities for running those works, provided that you comply with
+ the terms of this License in conveying all material for which you do
+ not control copyright. Those thus making or running the covered works
+ for you must do so exclusively on your behalf, under your direction
+ and control, on terms that prohibit them from making any copies of
+ your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+ the conditions stated below. Sublicensing is not allowed; section 10
+ makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+ measure under any applicable law fulfilling obligations under article
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
+ similar laws prohibiting or restricting circumvention of such
+ measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+ circumvention of technological measures to the extent such circumvention
+ is effected by exercising rights under this License with respect to
+ the covered work, and you disclaim any intention to limit operation or
+ modification of the work as a means of enforcing, against the work's
+ users, your or third parties' legal rights to forbid circumvention of
+ technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+ receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice;
+ keep intact all notices stating that this License and any
+ non-permissive terms added in accord with section 7 apply to the code;
+ keep intact all notices of the absence of any warranty; and give all
+ recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+ and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+ produce it from the Program, in the form of source code under the
+ terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+ works, which are not by their nature extensions of the covered work,
+ and which are not combined with it such as to form a larger program,
+ in or on a volume of a storage or distribution medium, is called an
+ "aggregate" if the compilation and its resulting copyright are not
+ used to limit the access or legal rights of the compilation's users
+ beyond what the individual works permit. Inclusion of a covered work
+ in an aggregate does not cause this License to apply to the other
+ parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+ of sections 4 and 5, provided that you also convey the
+ machine-readable Corresponding Source under the terms of this License,
+ in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+ from the Corresponding Source as a System Library, need not be
+ included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+ tangible personal property which is normally used for personal, family,
+ or household purposes, or (2) anything designed or sold for incorporation
+ into a dwelling. In determining whether a product is a consumer product,
+ doubtful cases shall be resolved in favor of coverage. For a particular
+ product received by a particular user, "normally used" refers to a
+ typical or common use of that class of product, regardless of the status
+ of the particular user or of the way in which the particular user
+ actually uses, or expects or is expected to use, the product. A product
+ is a consumer product regardless of whether the product has substantial
+ commercial, industrial or non-consumer uses, unless such uses represent
+ the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+ procedures, authorization keys, or other information required to install
+ and execute modified versions of a covered work in that User Product from
+ a modified version of its Corresponding Source. The information must
+ suffice to ensure that the continued functioning of the modified object
+ code is in no case prevented or interfered with solely because
+ modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+ specifically for use in, a User Product, and the conveying occurs as
+ part of a transaction in which the right of possession and use of the
+ User Product is transferred to the recipient in perpetuity or for a
+ fixed term (regardless of how the transaction is characterized), the
+ Corresponding Source conveyed under this section must be accompanied
+ by the Installation Information. But this requirement does not apply
+ if neither you nor any third party retains the ability to install
+ modified object code on the User Product (for example, the work has
+ been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+ requirement to continue to provide support service, warranty, or updates
+ for a work that has been modified or installed by the recipient, or for
+ the User Product in which it has been modified or installed. Access to a
+ network may be denied when the modification itself materially and
+ adversely affects the operation of the network or violates the rules and
+ protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+ in accord with this section must be in a format that is publicly
+ documented (and with an implementation available to the public in
+ source code form), and must require no special password or key for
+ unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+ License by making exceptions from one or more of its conditions.
+ Additional permissions that are applicable to the entire Program shall
+ be treated as though they were included in this License, to the extent
+ that they are valid under applicable law. If additional permissions
+ apply only to part of the Program, that part may be used separately
+ under those permissions, but the entire Program remains governed by
+ this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+ remove any additional permissions from that copy, or from any part of
+ it. (Additional permissions may be written to require their own
+ removal in certain cases when you modify the work.) You may place
+ additional permissions on material, added by you to a covered work,
+ for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+ add to a covered work, you may (if authorized by the copyright holders of
+ that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+ restrictions" within the meaning of section 10. If the Program as you
+ received it, or any part of it, contains a notice stating that it is
+ governed by this License along with a term that is a further
+ restriction, you may remove that term. If a license document contains
+ a further restriction but permits relicensing or conveying under this
+ License, you may add to a covered work material governed by the terms
+ of that license document, provided that the further restriction does
+ not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+ must place, in the relevant source files, a statement of the
+ additional terms that apply to those files, or a notice indicating
+ where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+ form of a separately written license, or stated as exceptions;
+ the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+ provided under this License. Any attempt otherwise to propagate or
+ modify it is void, and will automatically terminate your rights under
+ this License (including any patent licenses granted under the third
+ paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+ license from a particular copyright holder is reinstated (a)
+ provisionally, unless and until the copyright holder explicitly and
+ finally terminates your license, and (b) permanently, if the copyright
+ holder fails to notify you of the violation by some reasonable means
+ prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+ reinstated permanently if the copyright holder notifies you of the
+ violation by some reasonable means, this is the first time you have
+ received notice of violation of this License (for any work) from that
+ copyright holder, and you cure the violation prior to 30 days after
+ your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+ licenses of parties who have received copies or rights from you under
+ this License. If your rights have been terminated and not permanently
+ reinstated, you do not qualify to receive new licenses for the same
+ material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+ run a copy of the Program. Ancillary propagation of a covered work
+ occurring solely as a consequence of using peer-to-peer transmission
+ to receive a copy likewise does not require acceptance. However,
+ nothing other than this License grants you permission to propagate or
+ modify any covered work. These actions infringe copyright if you do
+ not accept this License. Therefore, by modifying or propagating a
+ covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+ receives a license from the original licensors, to run, modify and
+ propagate that work, subject to this License. You are not responsible
+ for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+ organization, or substantially all assets of one, or subdividing an
+ organization, or merging organizations. If propagation of a covered
+ work results from an entity transaction, each party to that
+ transaction who receives a copy of the work also receives whatever
+ licenses to the work the party's predecessor in interest had or could
+ give under the previous paragraph, plus a right to possession of the
+ Corresponding Source of the work from the predecessor in interest, if
+ the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+ rights granted or affirmed under this License. For example, you may
+ not impose a license fee, royalty, or other charge for exercise of
+ rights granted under this License, and you may not initiate litigation
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
+ any patent claim is infringed by making, using, selling, offering for
+ sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+ License of the Program or a work on which the Program is based. The
+ work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+ owned or controlled by the contributor, whether already acquired or
+ hereafter acquired, that would be infringed by some manner, permitted
+ by this License, of making, using, or selling its contributor version,
+ but do not include claims that would be infringed only as a
+ consequence of further modification of the contributor version. For
+ purposes of this definition, "control" includes the right to grant
+ patent sublicenses in a manner consistent with the requirements of
+ this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+ patent license under the contributor's essential patent claims, to
+ make, use, sell, offer for sale, import and otherwise run, modify and
+ propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+ agreement or commitment, however denominated, not to enforce a patent
+ (such as an express permission to practice a patent or covenant not to
+ sue for patent infringement). To "grant" such a patent license to a
+ party means to make such an agreement or commitment not to enforce a
+ patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+ and the Corresponding Source of the work is not available for anyone
+ to copy, free of charge and under the terms of this License, through a
+ publicly available network server or other readily accessible means,
+ then you must either (1) cause the Corresponding Source to be so
+ available, or (2) arrange to deprive yourself of the benefit of the
+ patent license for this particular work, or (3) arrange, in a manner
+ consistent with the requirements of this License, to extend the patent
+ license to downstream recipients. "Knowingly relying" means you have
+ actual knowledge that, but for the patent license, your conveying the
+ covered work in a country, or your recipient's use of the covered work
+ in a country, would infringe one or more identifiable patents in that
+ country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+ arrangement, you convey, or propagate by procuring conveyance of, a
+ covered work, and grant a patent license to some of the parties
+ receiving the covered work authorizing them to use, propagate, modify
+ or convey a specific copy of the covered work, then the patent license
+ you grant is automatically extended to all recipients of the covered
+ work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+ the scope of its coverage, prohibits the exercise of, or is
+ conditioned on the non-exercise of one or more of the rights that are
+ specifically granted under this License. You may not convey a covered
+ work if you are a party to an arrangement with a third party that is
+ in the business of distributing software, under which you make payment
+ to the third party based on the extent of your activity of conveying
+ the work, and under which the third party grants, to any of the
+ parties who would receive the covered work from you, a discriminatory
+ patent license (a) in connection with copies of the covered work
+ conveyed by you (or copies made from those copies), or (b) primarily
+ for and in connection with specific products or compilations that
+ contain the covered work, unless you entered into that arrangement,
+ or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+ any implied license or other defenses to infringement that may
+ otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot convey a
+ covered work so as to satisfy simultaneously your obligations under this
+ License and any other pertinent obligations, then as a consequence you may
+ not convey it at all. For example, if you agree to terms that obligate you
+ to collect a royalty for further conveying from those to whom you convey
+ the Program, the only way you could satisfy both those terms and this
+ License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+ permission to link or combine any covered work with a work licensed
+ under version 3 of the GNU Affero General Public License into a single
+ combined work, and to convey the resulting work. The terms of this
+ License will continue to apply to the part which is the covered work,
+ but the special requirements of the GNU Affero General Public License,
+ section 13, concerning interaction through a network will apply to the
+ combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+ the GNU General Public License from time to time. Such new versions will
+ be similar in spirit to the present version, but may differ in detail to
+ address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+ Program specifies that a certain numbered version of the GNU General
+ Public License "or any later version" applies to it, you have the
+ option of following the terms and conditions either of that numbered
+ version or of any later version published by the Free Software
+ Foundation. If the Program does not specify a version number of the
+ GNU General Public License, you may choose any version ever published
+ by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+ versions of the GNU General Public License can be used, that proxy's
+ public statement of acceptance of a version permanently authorizes you
+ to choose that version for the Program.
+
+ Later license versions may give you additional or different
+ permissions. However, no additional obligations are imposed on any
+ author or copyright holder as a result of your choosing to follow a
+ later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+ SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+ above cannot be given local legal effect according to their terms,
+ reviewing courts shall apply local law that most closely approximates
+ an absolute waiver of all civil liability in connection with the
+ Program, unless a warranty or assumption of liability accompanies a
+ copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+ possible use to the public, the best way to achieve this is to make it
+ free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+ to attach them to the start of each source file to most effectively
+ state the exclusion of warranty; and each file should have at least
+ the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+ Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+ notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+ The hypothetical commands `show w' and `show c' should show the appropriate
+ parts of the General Public License. Of course, your program's commands
+ might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
+ For more information on this, and how to apply and follow the GNU GPL, see
+ .
+
+ The GNU General Public License does not permit incorporating your program
+ into proprietary programs. If your program is a subroutine library, you
+ may consider it more useful to permit linking proprietary applications with
+ the library. If this is what you want to do, use the GNU Lesser General
+ Public License instead of this License. But first, please read
+ .
+
+ Name: libquadmath
+ Files: numpy.libs/libquadmath*.so
+ Description: dynamically linked to files compiled with gcc
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
+ License: LGPL-2.1-or-later
+
+ GCC Quad-Precision Math Library
+ Copyright (C) 2010-2019 Free Software Foundation, Inc.
+ Written by Francois-Xavier Coudert
+
+ This file is part of the libquadmath library.
+ Libquadmath is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ Libquadmath is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+ https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
+
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: C
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Programming Language :: Python :: 3.14
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Software Development
+Classifier: Topic :: Scientific/Engineering
+Classifier: Typing :: Typed
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Operating System :: MacOS
+Project-URL: homepage, https://numpy.org
+Project-URL: documentation, https://numpy.org/doc/
+Project-URL: source, https://github.com/numpy/numpy
+Project-URL: download, https://pypi.org/project/numpy/#files
+Project-URL: tracker, https://github.com/numpy/numpy/issues
+Project-URL: release notes, https://numpy.org/doc/stable/release
+Requires-Python: >=3.11
+Description-Content-Type: text/markdown
+
+
+
+
+
+
+[](
+https://numfocus.org)
+[](
+https://pypi.org/project/numpy/)
+[](
+https://anaconda.org/conda-forge/numpy)
+[](
+https://stackoverflow.com/questions/tagged/numpy)
+[](
+https://doi.org/10.1038/s41586-020-2649-2)
+[](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy)
+[](https://pypi.org/project/numpy/)
+
+
+NumPy is the fundamental package for scientific computing with Python.
+
+- **Website:** https://numpy.org
+- **Documentation:** https://numpy.org/doc
+- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion
+- **Source code:** https://github.com/numpy/numpy
+- **Contributing:** https://numpy.org/devdocs/dev/index.html
+- **Bug reports:** https://github.com/numpy/numpy/issues
+- **Report a security vulnerability:** https://tidelift.com/docs/security
+
+It provides:
+
+- a powerful N-dimensional array object
+- sophisticated (broadcasting) functions
+- tools for integrating C/C++ and Fortran code
+- useful linear algebra, Fourier transform, and random number capabilities
+
+Testing:
+
+NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with:
+
+ python -c "import numpy, sys; sys.exit(numpy.test() is False)"
+
+Code of Conduct
+----------------------
+
+NumPy is a community-driven open source project developed by a diverse group of
+[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong
+commitment to creating an open, inclusive, and positive community. Please read the
+[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact
+with others in a way that makes our community thrive.
+
+Call for Contributions
+----------------------
+
+The NumPy project welcomes your expertise and enthusiasm!
+
+Small improvements or fixes are always appreciated. If you are considering larger contributions
+to the source code, please contact us through the [mailing
+list](https://mail.python.org/mailman/listinfo/numpy-discussion) first.
+
+Writing code isn’t the only way to contribute to NumPy. You can also:
+- review pull requests
+- help us stay on top of new and old issues
+- develop tutorials, presentations, and other educational materials
+- maintain and improve [our website](https://github.com/numpy/numpy.org)
+- develop graphic design for our brand assets and promotional materials
+- translate website content
+- help with outreach and onboard new contributors
+- write grant proposals and help with other fundraising efforts
+
+For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/).
+If you’re unsure where to start or how your skills fit in, reach out! You can
+ask on the mailing list or here, on GitHub, by opening a new issue or leaving a
+comment on a relevant issue that is already open.
+
+Our preferred channels of communication are all public, but if you’d like to
+speak to us in private first, contact our community coordinators at
+numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for
+an invitation).
+
+We also have a biweekly community call, details of which are announced on the
+mailing list. You are very welcome to join.
+
+If you are new to contributing to open source, [this
+guide](https://opensource.guide/how-to-contribute/) helps explain why, what,
+and how to successfully get involved.
diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/RECORD b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/RECORD
new file mode 100644
index 00000000..e17eb1c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/RECORD
@@ -0,0 +1,1313 @@
+../../../bin/f2py,sha256=FMnTR3SoQInWa0YtxP1-1nIMkQBre2Z3fp_LdQugdcU,291
+../../../bin/numpy-config,sha256=LX9pSNBRQfttj1LTyqktki_Q-ETewQDZO_g2DoiI4DA,291
+numpy-2.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+numpy-2.3.3.dist-info/LICENSE.txt,sha256=IEajEw5QsRwBZZs6DZY-auC3Q2_46Jy8_Z6HvGES1ZU,47768
+numpy-2.3.3.dist-info/METADATA,sha256=Zh7Sk4Ex4WGl9yMJnHI25wnnBd7IVtHvN1J9N9CsTcY,62117
+numpy-2.3.3.dist-info/RECORD,,
+numpy-2.3.3.dist-info/WHEEL,sha256=rJCplGeGFjRoUvxJWhVONRgyGYGZoOkhyej0s3KyjoI,138
+numpy-2.3.3.dist-info/entry_points.txt,sha256=7Cb63gyL2sIRpsHdADpl6xaIW5JTlUI-k_yqEVr0BSw,220
+numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0,sha256=xgkASOzMdjUiwS7wFvgdprYnyzoET1XPBHmoOcQcCYA,2833617
+numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0,sha256=btUTf0Enga14Y0OftUNhP2ILQ8MrYykqACkkYWL1u8Y,250985
+numpy.libs/libscipy_openblas64_-8fb3d286.so,sha256=N7prNzoi_0KETgFPXcu0KBg_IyX9mTNhtB_M9oJBMz0,25050385
+numpy/__config__.py,sha256=y4ewV7-YUXH7wquW-VHV-E1dVrJRPCESUK76xy73RJA,5281
+numpy/__config__.pyi,sha256=7nE-kUNs2lWPIpofTastbf2PCMgCka7FCiK5jrFkDYE,2367
+numpy/__init__.cython-30.pxd,sha256=qT7d9_TWkj4UsfpY1uaBUmcYflptcjZfDGZsYJth8rU,47123
+numpy/__init__.pxd,sha256=BFYYkcQUcrl0Ee8ReoQiA0wgtxsWeIGovC8jYeEw5qg,43758
+numpy/__init__.py,sha256=gjanU4Bds0wp75zQNpTgr4g7YyXrT9JGzIPSvguEfok,25226
+numpy/__init__.pyi,sha256=lFZru8V_KWUuK-oD5tgqCNy1pJSZ-aKyJASjncTlRGo,219154
+numpy/__pycache__/__config__.cpython-312.pyc,,
+numpy/__pycache__/__init__.cpython-312.pyc,,
+numpy/__pycache__/_array_api_info.cpython-312.pyc,,
+numpy/__pycache__/_configtool.cpython-312.pyc,,
+numpy/__pycache__/_distributor_init.cpython-312.pyc,,
+numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc,,
+numpy/__pycache__/_globals.cpython-312.pyc,,
+numpy/__pycache__/_pytesttester.cpython-312.pyc,,
+numpy/__pycache__/conftest.cpython-312.pyc,,
+numpy/__pycache__/dtypes.cpython-312.pyc,,
+numpy/__pycache__/exceptions.cpython-312.pyc,,
+numpy/__pycache__/matlib.cpython-312.pyc,,
+numpy/__pycache__/version.cpython-312.pyc,,
+numpy/_array_api_info.py,sha256=NzJSuf8vutjGSqiqahq3jRI3SxMX4X1cva4J6dFv4EU,10354
+numpy/_array_api_info.pyi,sha256=QP_tYDbjtTOPtJECk3ehRXOQ24QM8TZjAfWX8XAsZCM,4864
+numpy/_configtool.py,sha256=EFRJ3pazTxYhE9op-ocWyKTLZrrpFhfmmS_tWrq8Cxo,1007
+numpy/_configtool.pyi,sha256=d4f22QGwpb1ZtDk-1Sn72ftvo4incC5E2JAikmjzfJI,24
+numpy/_core/__init__.py,sha256=yJ0iy1fXk9ogCFnflCWzBBLwlKSS-xlQWCpWCozaT6c,5542
+numpy/_core/__init__.pyi,sha256=Mj2I4BtqBVNUZVs5o1T58Z7wSaWjfhX0nCl-a0ULjgA,86
+numpy/_core/__pycache__/__init__.cpython-312.pyc,,
+numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc,,
+numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc,,
+numpy/_core/__pycache__/_asarray.cpython-312.pyc,,
+numpy/_core/__pycache__/_dtype.cpython-312.pyc,,
+numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc,,
+numpy/_core/__pycache__/_exceptions.cpython-312.pyc,,
+numpy/_core/__pycache__/_internal.cpython-312.pyc,,
+numpy/_core/__pycache__/_machar.cpython-312.pyc,,
+numpy/_core/__pycache__/_methods.cpython-312.pyc,,
+numpy/_core/__pycache__/_string_helpers.cpython-312.pyc,,
+numpy/_core/__pycache__/_type_aliases.cpython-312.pyc,,
+numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc,,
+numpy/_core/__pycache__/arrayprint.cpython-312.pyc,,
+numpy/_core/__pycache__/cversions.cpython-312.pyc,,
+numpy/_core/__pycache__/defchararray.cpython-312.pyc,,
+numpy/_core/__pycache__/einsumfunc.cpython-312.pyc,,
+numpy/_core/__pycache__/fromnumeric.cpython-312.pyc,,
+numpy/_core/__pycache__/function_base.cpython-312.pyc,,
+numpy/_core/__pycache__/getlimits.cpython-312.pyc,,
+numpy/_core/__pycache__/memmap.cpython-312.pyc,,
+numpy/_core/__pycache__/multiarray.cpython-312.pyc,,
+numpy/_core/__pycache__/numeric.cpython-312.pyc,,
+numpy/_core/__pycache__/numerictypes.cpython-312.pyc,,
+numpy/_core/__pycache__/overrides.cpython-312.pyc,,
+numpy/_core/__pycache__/printoptions.cpython-312.pyc,,
+numpy/_core/__pycache__/records.cpython-312.pyc,,
+numpy/_core/__pycache__/shape_base.cpython-312.pyc,,
+numpy/_core/__pycache__/strings.cpython-312.pyc,,
+numpy/_core/__pycache__/umath.cpython-312.pyc,,
+numpy/_core/_add_newdocs.py,sha256=ySKuP_4sVPNLHp1ojgTMhSRWi3d18CcBFHFHkD8Xf-U,208893
+numpy/_core/_add_newdocs.pyi,sha256=r__d_-GHkfjzuZ0qyjDztsKgdc1eIyeN-cBoYVgMBuo,168
+numpy/_core/_add_newdocs_scalars.py,sha256=Z5WcIAXy2Vs8kWLCzgyvxWVH0CAl-O64YFK3ttbU7yc,12600
+numpy/_core/_add_newdocs_scalars.pyi,sha256=ZnIk0TgL0szrv6SPCH-4dF469Q_92UvV5_ek47Oj7HM,573
+numpy/_core/_asarray.py,sha256=fCNHLaaCP-5Ia-RR_bIrHxWY3xklcmvlZiGhJIDiKLM,3911
+numpy/_core/_asarray.pyi,sha256=QHyb8DM_9U0otRugoNIyKjtvTVS3dZLn6DSxGi_ZU4U,1073
+numpy/_core/_dtype.py,sha256=cM6JnjoHLURWCHgN8VmQyjeiiDjcwhB5L_fPMOe1uuM,10547
+numpy/_core/_dtype.pyi,sha256=turm6RyVVEGKm6antqWWnyA0bnS2AuMwmKeFj-9mYHA,1851
+numpy/_core/_dtype_ctypes.py,sha256=KPPlakDsPkuThSOr5qFwW0jJ9VnjbvW4EWhObCHYGIE,3726
+numpy/_core/_dtype_ctypes.pyi,sha256=VwEZFViCPuHlCURv2jpJp9sbHh2hYUpzC_FRZNNGMMw,3682
+numpy/_core/_exceptions.py,sha256=X8Eg1hq1uU8L9wiOwFo2jRq6S0vnjCdgYFHj3hAW9Co,5159
+numpy/_core/_exceptions.pyi,sha256=ESXpijoEK0HrPy0dQYtjO62-Krd0419WLlrDROqwTyU,1900
+numpy/_core/_internal.py,sha256=YZ6nMGVOvfTD1nzk2XqRdz8k05WVnYGiljb1TnHvMq8,28981
+numpy/_core/_internal.pyi,sha256=2V2rXMQocZZHw8z_9HSrUi3LNGxaxA1nm0B0fcofjU8,2654
+numpy/_core/_machar.py,sha256=YUX24XYbxXJ79KrWar27FlDYKfeodr_RCkE7w0bETqs,11569
+numpy/_core/_machar.pyi,sha256=ESXpijoEK0HrPy0dQYtjO62-Krd0419WLlrDROqwTyU,1900
+numpy/_core/_methods.py,sha256=4qiUUES5wnOFeXnPavtqqMVhZ09ZZeSKlwqdPw2eKSI,9430
+numpy/_core/_methods.pyi,sha256=5HzEt2Z0-vxQfS1QJKDlTvNyLXcinNsja-xQiehMGbw,526
+numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so,sha256=-uOqys8wnCdsxMjL3BNzURd2PxxfZSy0i7l-zJwbndg,141888
+numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so,sha256=O1JIY1ShYFehh2iXHmUi3baCZRoigYbS-wXkNYzEdN4,10808937
+numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so,sha256=LaAKYVcg0fwPuQL89z8q1-GrpQykaaNK4kJuXRArmDs,16800
+numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so,sha256=L-0ufvXmFouELpWO_1IJOyZp7m1EiznGwGSZC3lGRVA,59592
+numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so,sha256=NGUI17JKGWykF1UY3TsJruMrbH-YCfePwDFQRDbA0_c,2882368
+numpy/_core/_simd.pyi,sha256=2z2sFPgXr3KRzHltbt31HVrhkXM0VwXFp1lUjxaRMAM,669
+numpy/_core/_string_helpers.py,sha256=6Smgoi6oD2CunjwBSr9BZ20HkCnvW6nTPblTOU3pWng,2845
+numpy/_core/_string_helpers.pyi,sha256=xLlLKJHutEYzyKnTG2k7clcWvVUTvD319SjnKmDXuac,358
+numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so,sha256=U__bzTJORJ4S1Ax393Y1ui1vpwNmQKJzNAIkICwK-hk,16936
+numpy/_core/_type_aliases.py,sha256=msFHBkZ2s1wKQyuguK_cF6NBS0_3AOww7j3oh26mo3Q,3489
+numpy/_core/_type_aliases.pyi,sha256=Tn1Ex4bAGQa1HuMx0Vn-tEBl3HDF_uesTzmiSrz81kQ,2388
+numpy/_core/_ufunc_config.py,sha256=hVIyOmLjFYdZQY5plKWuOMk-U7UzeYSEo4ygiXOFcBU,15052
+numpy/_core/_ufunc_config.pyi,sha256=rh1jhYnkafjGvrc3ytC5mOSwRnjwhoggw8yDeLCS3jc,972
+numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so,sha256=7Q7bbK-i26WJdhCDw-22TnLzD9gWwyqa93FEClt5M7A,50312
+numpy/_core/arrayprint.py,sha256=AAAvkrI0U6Pa_wZOnpuVZBpdsCCjpYpcWF8sA_SPYbg,65278
+numpy/_core/arrayprint.pyi,sha256=ogMYnp2ipEfagADzRaRK9ySGAfH_oabGNJegiA6LicY,6971
+numpy/_core/cversions.py,sha256=H_iNIpx9-hY1cQNxqjT2d_5SXZhJbMo_caq4_q6LB7I,347
+numpy/_core/defchararray.py,sha256=1tSvLWEeac20DodpDBxapJKwwczpJG1lVy2qjScIVXg,38007
+numpy/_core/defchararray.pyi,sha256=Mq-ytnNliY2jEYAl_0l5ZTRx9IpNMaJpDmkoerRUILE,27985
+numpy/_core/einsumfunc.py,sha256=heFeCiEKji-qfVk8zAZ1b5bKm-MUMLzCETMQ7yyHBhc,52820
+numpy/_core/einsumfunc.pyi,sha256=b10CKdAeLEryabwRMdiW1cKdNyqWLa5kMV7O2_X8g3A,4893
+numpy/_core/fromnumeric.py,sha256=s0f6WfkIRVwFZMlDrdYb3EjyF9vMGr0bms0Pc-VcOAM,143882
+numpy/_core/fromnumeric.pyi,sha256=VoUF-d31OuZYaRIi-duoYAABOADe4KjbBhFFx3Hd_Mc,42034
+numpy/_core/function_base.py,sha256=QT1pbll_8rf_3ZsGtLQoAeQ1OSqCqeAGtMTzPAE1I_w,19683
+numpy/_core/function_base.pyi,sha256=A9BlWQeiX08iIwDQJ6W1FUhy2qrRPVenXtHiEnPkt0k,7064
+numpy/_core/getlimits.py,sha256=32Qe7tlBFdyiDvdSjG1cp2a0NJ0rSMxeDRij3agiPrg,26101
+numpy/_core/getlimits.pyi,sha256=q30hQ3wDenmxoZUSoSOqyVrZZVGlsixXCHe6QUthbp8,61
+numpy/_core/include/numpy/__multiarray_api.c,sha256=ndBF5wbdd7F8_zWvR52MDO0Qm15_PrCCBlSk4dky4F8,12698
+numpy/_core/include/numpy/__multiarray_api.h,sha256=6ep4M4s0Cxoj4DgJGns-0___TdSqDJoUPnZr0BBYwkU,61639
+numpy/_core/include/numpy/__ufunc_api.c,sha256=Fg7WlH4Ow6jETKRArVL_QF11ABKYz1VpOve56_U3E0w,1755
+numpy/_core/include/numpy/__ufunc_api.h,sha256=J5h9KHdntM27XQdq1PwHwI7V2v-sOx6AIbgCwP8mg9M,13175
+numpy/_core/include/numpy/_neighborhood_iterator_imp.h,sha256=s-Hw_l5WRwKtYvsiIghF0bg-mA_CgWnzFFOYVFJ-q4k,1857
+numpy/_core/include/numpy/_numpyconfig.h,sha256=lfgEF_31SixqOweZEHjn19bN5ng62MSwuVWEXS1_p_U,926
+numpy/_core/include/numpy/_public_dtype_api_table.h,sha256=n6_Kb98SyvsR_X7stiNA6VuGp_c5W1e4fMVcJdO0wis,4574
+numpy/_core/include/numpy/arrayobject.h,sha256=mU5vpcQ95PH1j3bp8KYhJOFHB-GxwRjSUsR7nxlTSRk,204
+numpy/_core/include/numpy/arrayscalars.h,sha256=LlyrZIa_5td11BfqfMCv1hYbiG6__zxxGv1MRj8uIVo,4243
+numpy/_core/include/numpy/dtype_api.h,sha256=Gn37RzObmcTsL6YUYY9aG22Ct8F-r4ZaC53NPFqaIso,19238
+numpy/_core/include/numpy/halffloat.h,sha256=TRZfXgipa-dFppX2uNgkrjrPli-1BfJtadWjAembJ4s,1959
+numpy/_core/include/numpy/ndarrayobject.h,sha256=MnykWmchyS05ler_ZyhFIr_0j6c0IcndEi3X3n0ZWDk,12057
+numpy/_core/include/numpy/ndarraytypes.h,sha256=kS9uirBf_ewXdIgsmRQETk3aQXeSPjLPCa6hlX5By-0,65810
+numpy/_core/include/numpy/npy_2_compat.h,sha256=wdjB7_-AtW3op67Xbj3EVH6apSF7cRG6h3c5hBz-YMs,8546
+numpy/_core/include/numpy/npy_2_complexcompat.h,sha256=eE9dV_Iq3jEfGGJFH_pQjJnvC6eQ12WgOB7cZMmHByE,857
+numpy/_core/include/numpy/npy_3kcompat.h,sha256=grN6W1n7benj3F2pSAOpl_s6vn1Y50QfAP-DaleD7cA,9648
+numpy/_core/include/numpy/npy_common.h,sha256=-05bavbk44KUjy5Q-qnM5YzU32VJRv0N8ozfCI_SKcE,32586
+numpy/_core/include/numpy/npy_cpu.h,sha256=Vw8mVPm1fGmLdeLV3RoBZnBMMXA8cghgwRdWhlkDLi4,4225
+numpy/_core/include/numpy/npy_endian.h,sha256=vvK7ZlOt0vgqTVrIyviWzoxQz70S-BvflS4Z_k6X5XE,2834
+numpy/_core/include/numpy/npy_math.h,sha256=aeSFs60QbWPy1gIPyHDPrYExifm5mbDAcjP_mLk_PF0,18858
+numpy/_core/include/numpy/npy_no_deprecated_api.h,sha256=0yZrJcQEJ6MCHJInQk5TP9_qZ4t7EfBuoLOJ34IlJd4,678
+numpy/_core/include/numpy/npy_os.h,sha256=hlQsg_7-RkvS3s8OM8KXy99xxyJbCm-W1AYVcdnO1cw,1256
+numpy/_core/include/numpy/numpyconfig.h,sha256=FGuDPIr0gTFYgUzhVMXqq5BIQL-WqgmXfp003cUwpWE,7333
+numpy/_core/include/numpy/random/LICENSE.txt,sha256=-8U59H0M-DvGE3gID7hz1cFGMBJsrL_nVANcOSbapew,1018
+numpy/_core/include/numpy/random/bitgen.h,sha256=49AwKOR552r-NkhuSOF1usb_URiMSRMvD22JF5pKIng,488
+numpy/_core/include/numpy/random/distributions.h,sha256=W5tOyETd0m1W0GdaZ5dJP8fKlBtsTpG23V2Zlmrlqpg,9861
+numpy/_core/include/numpy/random/libdivide.h,sha256=ew9MNhPQd1LsCZiWiFmj9IZ7yOnA3HKOXffDeR9X1jw,80138
+numpy/_core/include/numpy/ufuncobject.h,sha256=BengvqXqiy4ipzz23KQi1Kldy9ybYUs4Sp5yA73VgiU,11780
+numpy/_core/include/numpy/utils.h,sha256=wMNomSH3Dfj0q78PrjLVtFtN-FPo7UJ4o0ifCUO-6Es,1185
+numpy/_core/lib/libnpymath.a,sha256=oXeSGrMy3L_zDbnj58as1hihfFFftHWb73ah3KPeCT4,54312
+numpy/_core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147
+numpy/_core/lib/npy-pkg-config/npymath.ini,sha256=0iMzarBfkkZ_EXO95_kz-SHZRcNIEwIeOjE_esVBkRQ,361
+numpy/_core/lib/pkgconfig/numpy.pc,sha256=SBZcZL5NZ_HgJxW6wc5xZJOoOjAxwSTo539d_6v-1tk,191
+numpy/_core/memmap.py,sha256=yIsQ6n9kpZulggRJJFkTbjVwnB4leoyizvUpc2iU4n8,12651
+numpy/_core/memmap.pyi,sha256=_LKjb_PuhcQwpqc2lFaL379DYzQ9PtuKdlVV3jXOYEM,47
+numpy/_core/multiarray.py,sha256=zwHBdyOoxiBRcOhG2QB_xBAYm-p8ARSpQbye9EzrrBo,58155
+numpy/_core/multiarray.pyi,sha256=Uy5Unmczfk7Pyz8Ohgh_5g4ASY7aZ0ZYpmhhmPnG6OA,32150
+numpy/_core/numeric.py,sha256=_DcnvXu6oaHXSi9Q-BV9yGzfx7tc9iCx69r9MnJDm5g,82322
+numpy/_core/numeric.pyi,sha256=ZSWTBi2kdP7BPG3KMGJWJIlqM9BLKFmgq_xgK_GnDUo,19042
+numpy/_core/numerictypes.py,sha256=mKPbsOzX9vyWQEv4jlf4xnlPfP4IYAXeILHFdb2FS0I,15957
+numpy/_core/numerictypes.pyi,sha256=Kp4_fEg_Wj_Yv8xvI7H1TJXrDVsxb96oIH5EmnQyW1c,3270
+numpy/_core/overrides.py,sha256=MtgzOBavG7wzQYCA7O7ArdCJVV72STIb_cvkWBuDLJE,7241
+numpy/_core/overrides.pyi,sha256=2lHte4EbOTDQvknjVfO71RgiLXnOpGQky5j2meS09JU,1713
+numpy/_core/printoptions.py,sha256=NFpvy5bnjbvqnKeqQt0veEExpAAYAVNoiGXH3pglWAc,1056
+numpy/_core/printoptions.pyi,sha256=eNiliCnDuZBxla6X9kwZ-7YiCn-UtMbT-U_qTnw8l9w,594
+numpy/_core/records.py,sha256=hoXCDswM6hbytiGdYGkhRISzQjnqImXcIdGlNuOUDX4,36767
+numpy/_core/records.pyi,sha256=tob9AxABbCXsO--gWXX-pD5Bo50NgCXKOt4JstVESjY,8935
+numpy/_core/shape_base.py,sha256=7yDPrIXTmmBnZMUStHXsq1iJNiGmIxEAcepxQ9o-JVQ,32738
+numpy/_core/shape_base.pyi,sha256=Qgfi1izbvKgRWAojCMXw3HsONgvsryFCsDhAvNI1dZE,4753
+numpy/_core/strings.py,sha256=yjdeNG2e0wpljpnwGISi7NXVLD4ttCM5vAYSSV1yI8k,50642
+numpy/_core/strings.pyi,sha256=Fyjq70ZP70BzV3Ov490dxX5EOv76sgnxA7qVBxeXuRU,13502
+numpy/_core/tests/__pycache__/_locales.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/_natype.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_api.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_half.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_print.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_records.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc,,
+numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc,,
+numpy/_core/tests/_locales.py,sha256=lvHqUJVMsrE7Jh3N_KpO5fGBZgID-l3Zr4-_RrH1ZNM,2176
+numpy/_core/tests/_natype.py,sha256=YCAkuhvWuMjTjt-C0VjA8zzui-KoioNwOmAYnvf6KR0,6525
+numpy/_core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716
+numpy/_core/tests/data/generate_umath_validation_data.cpp,sha256=BQakB5o8Mq60zex5ovVO0IatNa7xbF8JvXmtk6373So,5842
+numpy/_core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640
+numpy/_core/tests/data/umath-validation-set-README.txt,sha256=pxWwOaGGahaRd-AlAidDfocLyrAiDp0whf5hC7hYwqM,967
+numpy/_core/tests/data/umath-validation-set-arccos.csv,sha256=yBlz8r6RnnAYhdlobzGGo2FKY-DoSTQaP26y8138a3I,61365
+numpy/_core/tests/data/umath-validation-set-arccosh.csv,sha256=0GXe7XG1Z3jXAcK-OlEot_Df3MetDQSlbm3MJ__iMQk,61365
+numpy/_core/tests/data/umath-validation-set-arcsin.csv,sha256=w_Sv2NDn-mLZSAqb56JT2g4bqBzxYAihedWxHuf82uU,61339
+numpy/_core/tests/data/umath-validation-set-arcsinh.csv,sha256=DZrMYoZZZyM1DDyXNUxSlzx6bOgajnRSLWAzxcPck8k,60289
+numpy/_core/tests/data/umath-validation-set-arctan.csv,sha256=0aosXZ-9DYTop0lj4bfcBNwYVvjZdW13hbMRTRRTmV0,60305
+numpy/_core/tests/data/umath-validation-set-arctanh.csv,sha256=HEK9ePx1OkKrXIKkMUV0IxrmsDqIlgKddiI-LvF2J20,61339
+numpy/_core/tests/data/umath-validation-set-cbrt.csv,sha256=v855MTZih-fZp_GuEDst2qaIsxU4a7vlAbeIJy2xKpc,60846
+numpy/_core/tests/data/umath-validation-set-cos.csv,sha256=0PNnDqKkokZ7ERVDgbes8KNZc-ISJrZUlVZc5LkW18E,59122
+numpy/_core/tests/data/umath-validation-set-cosh.csv,sha256=JKC4nKr3wTzA_XNSiQvVUq9zkYy4djvtu2-j4ZZ_7Oc,60869
+numpy/_core/tests/data/umath-validation-set-exp.csv,sha256=rUAWIbvyeKh9rPfp2n0Zq7AKq_nvHpgbgzLjAllhsek,17491
+numpy/_core/tests/data/umath-validation-set-exp2.csv,sha256=djosT-3fTpiN_f_2WOumgMuuKgC_XhpVO-QsUFwI6uU,58624
+numpy/_core/tests/data/umath-validation-set-expm1.csv,sha256=K7jL6N4KQGX71fj5hvYkzcMXk7MmQes8FwrNfyrPpgU,60299
+numpy/_core/tests/data/umath-validation-set-log.csv,sha256=ynzbVbKxFzxWFwxHnxX7Fpm-va09oI3oK1_lTe19g4w,11692
+numpy/_core/tests/data/umath-validation-set-log10.csv,sha256=NOBD-rOWI_FPG4Vmbzu3JtX9UA838f2AaDFA-waiqGA,68922
+numpy/_core/tests/data/umath-validation-set-log1p.csv,sha256=tdbYWPqWIz8BEbIyklynh_tpQJzo970Edd4ek6DsPb8,60303
+numpy/_core/tests/data/umath-validation-set-log2.csv,sha256=39EUD0vFMbwyoXoOhgCmid6NeEAQU7Ff7QFjPsVObIE,68917
+numpy/_core/tests/data/umath-validation-set-sin.csv,sha256=8PUjnQ_YfmxFb42XJrvpvmkeSpEOlEXSmNvIK4VgfAM,58611
+numpy/_core/tests/data/umath-validation-set-sinh.csv,sha256=XOsBUuPcMjiO_pevMalpmd0iRv2gmnh9u7bV9ZLLg8I,60293
+numpy/_core/tests/data/umath-validation-set-tan.csv,sha256=Hv2WUMIscfvQJ5Y5BipuHk4oE4VY6QKbQp_kNRdCqYQ,60299
+numpy/_core/tests/data/umath-validation-set-tanh.csv,sha256=iolZF_MOyWRgYSa-SsD4df5mnyFK18zrICI740SWoTc,60299
+numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc,,
+numpy/_core/tests/examples/cython/checks.pyx,sha256=nw6o0nlj3SfNQP3McS10zVH9UCZiITBdAi5yO4gm9Qo,10774
+numpy/_core/tests/examples/cython/meson.build,sha256=uuXVPKemNVMQ5MiEDqS4BXhwGHa96JHjS50WxZuJS_8,1268
+numpy/_core/tests/examples/cython/setup.py,sha256=JM6UnDql7LsAnRo6p9G-nRz3dfnoy9fHF6YVKy1OzdA,859
+numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc,,
+numpy/_core/tests/examples/limited_api/limited_api1.c,sha256=htSR9ER3S8AJqv4EZMsrxQ-SufTIlXNpuFI6MXQs87w,346
+numpy/_core/tests/examples/limited_api/limited_api2.pyx,sha256=1q4I59pdkCmMhLcYngN_XwQnPoLmDEo1uTGnhrLRjDc,203
+numpy/_core/tests/examples/limited_api/limited_api_latest.c,sha256=ltBLbrl1g9XxD2wvN_-g3NhIizc8mxnh2Z6wCyXo-8E,452
+numpy/_core/tests/examples/limited_api/meson.build,sha256=YM5RwW_waFymlWSHFhCCOHO6KCknooN0jCiqScL0i5M,1627
+numpy/_core/tests/examples/limited_api/setup.py,sha256=Y6tgsOF58qe7eG2QmRQHG2wacZWfpbJLT8u-5OamjqA,437
+numpy/_core/tests/test__exceptions.py,sha256=luMT6vPIdf6LuwFNGyT-xLMZaKZEYYOFzFpMaesojoE,2922
+numpy/_core/tests/test_abc.py,sha256=9y2SsJdkPeV0oW6dsROPZOcQ72_mXie1uU2yPN93wzo,2221
+numpy/_core/tests/test_api.py,sha256=NiqlxYyBOZlKVKIWs_vQTg6ZnOk5iE63nbz1GBdHXeI,22954
+numpy/_core/tests/test_argparse.py,sha256=pfFfRr0grfOt-6Y7D8q9yPmz8Fcx4UbUxLpe96Tk9Xg,2870
+numpy/_core/tests/test_array_api_info.py,sha256=PZ2EzS9pq4nLZRAvvUSOb2Ke5p7pb4u4P4HKLRZjstw,3063
+numpy/_core/tests/test_array_coercion.py,sha256=PJ3s7psngDM084R2x7luAHVkHoa31TDiH1FiZpUWSfs,34897
+numpy/_core/tests/test_array_interface.py,sha256=l39VuV4nCdIeV1RUvMtjjPohAgIvJP-V3GQ5MaPrVK8,7843
+numpy/_core/tests/test_arraymethod.py,sha256=my4I9YjpVGLwN1GMbuoEhBZEJN0PuH6R2wtvGHcfoWI,3223
+numpy/_core/tests/test_arrayobject.py,sha256=aVv2eGjunCMEDFgmFujxMpk4xb-zo1MQrFcwQLfblx0,2596
+numpy/_core/tests/test_arrayprint.py,sha256=6UmL93wltbIDKdhF_WcdPRH5mztX0wyzuBy6PYW3R_o,50738
+numpy/_core/tests/test_casting_floatingpoint_errors.py,sha256=cER1YCNEwq67uAPX0QhkJonb5oA4Ws1_t0Z2AWJjYJg,5076
+numpy/_core/tests/test_casting_unittests.py,sha256=HH849h4ox1dejLB4aFX2B9tSGf0WhVvPZBPJT4yTOAA,34336
+numpy/_core/tests/test_conversion_utils.py,sha256=HAIdSRUit1lhSQEn-UVPTwyNxKjP9bSr8NGeHXnp6ew,6362
+numpy/_core/tests/test_cpu_dispatcher.py,sha256=26vob-nCPkjtxf9lRlQvwoTR92lqquyDGPgE5DIoii8,1570
+numpy/_core/tests/test_cpu_features.py,sha256=lS9iIWWznKZgR8-G4ABZqznMTJGC343-FBaCG9ZHXmQ,15703
+numpy/_core/tests/test_custom_dtypes.py,sha256=LZCbBeoyCcluhz_drg5neyiAsoTaK-6DjB4l3LaNnTw,11766
+numpy/_core/tests/test_cython.py,sha256=hLdTcd5wbzMXOx_OyQEzNyFWm-rIcWto7LpCl1SNdIU,10186
+numpy/_core/tests/test_datetime.py,sha256=gbArTFwyvmbQSkvTwa7oCv6UXDuvYV3_AbFEvK4ImOo,122685
+numpy/_core/tests/test_defchararray.py,sha256=hmMd5Wv5PjTEIuBXq_DopSqJsnp-qJ8ub5BBGRKIUEw,30629
+numpy/_core/tests/test_deprecations.py,sha256=CayfNUVMMj4BYTIFdYR4xvL2Sy2CTLN7VTABe0HIlxg,17101
+numpy/_core/tests/test_dlpack.py,sha256=Lfi3Xd2umxJ4W8fJht5epHlYWwTKx7MB47i7dcOIpq8,5830
+numpy/_core/tests/test_dtype.py,sha256=e1ZLn0xj8FrlxK3FeHOOsoQ-xV17-FMM7mh7VpuuVhs,78797
+numpy/_core/tests/test_einsum.py,sha256=Sixz-ZogKZmnFz3t49voD6AsCxmxUl_c_DHxT9rdscE,56277
+numpy/_core/tests/test_errstate.py,sha256=czhSWJJ8mdDpkh76pAxU2-d4ebMyopyk2D_CC-2lzI0,4627
+numpy/_core/tests/test_extint128.py,sha256=F6TAH3PlGON3CNz-B4hunClNUTQYQ2R8CkvaX2Zqeo4,5625
+numpy/_core/tests/test_function_base.py,sha256=x6rHdbqXtHj07Oml_5DslnG6y8jm0XfW4RdV0Q_lHHA,17651
+numpy/_core/tests/test_getlimits.py,sha256=CAHTLA8QIYVXTLWCGAISUZaAJ-xd_cBnSdYaOGuLWn8,6976
+numpy/_core/tests/test_half.py,sha256=QSKuHAfa8NWvl0A51-XcV0UOIvk-ooLy6pndq90hr6k,24425
+numpy/_core/tests/test_hashtable.py,sha256=m9-IRALLhU5liPuAk4v-ZQTVQ4s5XtLhL6xRXf5QTOE,1147
+numpy/_core/tests/test_indexerrors.py,sha256=mU2MJbdpbrcvxLZqZR293So4ZJxMH4apAjqXufRyOis,4726
+numpy/_core/tests/test_indexing.py,sha256=lU0jP4UvEe2_MUiAhy4_GD1zvpdIwUrHviu0MJhW_wQ,55421
+numpy/_core/tests/test_item_selection.py,sha256=AoPUe3llYwKjv3dO1PW1qSml4SWrAAL3fNqpwKAku6w,6631
+numpy/_core/tests/test_limited_api.py,sha256=75nz_t-jBdjKim6j-WW7WsD2rPnJ_KQ-zrRUiP3nVic,3463
+numpy/_core/tests/test_longdouble.py,sha256=FjuntHkYe158dwWr7eYe_mlqkj7sQ9lQXKZ93CKF0Pc,12391
+numpy/_core/tests/test_machar.py,sha256=Aw8icmrolAGmbIuXhUIYd4YvqIRR1I8GkcSx0J2c6yM,1067
+numpy/_core/tests/test_mem_overlap.py,sha256=IGpRF2GnkLQxEiIizsVT0eWUtlgCcJQ4w0-BEjSpT_8,29219
+numpy/_core/tests/test_mem_policy.py,sha256=pL6kBK8fgtRDTfMubFGGWnliTPWnS64uZ9l1H5qI8hk,16794
+numpy/_core/tests/test_memmap.py,sha256=LtghbNqt9AOmAalIyZF3lepthcKircyNfb2-5_Tkj1c,8186
+numpy/_core/tests/test_multiarray.py,sha256=au2BIcxXH1rXMVBm4VKNA3aogJu3Qtd8bAwcoZzpDcM,400390
+numpy/_core/tests/test_multithreading.py,sha256=VkvO2311ch8a_EeF7RTmhAQWvtHXuTZhqLVZZH1ovKI,8601
+numpy/_core/tests/test_nditer.py,sha256=7y1wdYzpGdwEbHRc5xppx8FZ45cKxNrm3JKzUPvkhrE,136568
+numpy/_core/tests/test_nep50_promotions.py,sha256=i6KpABBWFB5PWCdEv8kIjNQd7ryAPINS5m_Tnu7sDj4,10068
+numpy/_core/tests/test_numeric.py,sha256=aM2TfTaSVE2fz0Z3nN72XoxSDvZzAdatwWpLYWGBBws,159748
+numpy/_core/tests/test_numerictypes.py,sha256=r4ZvEN0E8efuqZhx2spCXA5Mr14mK1BRpmOZFRp0LhU,23271
+numpy/_core/tests/test_overrides.py,sha256=0sDSmDWIr88GuCj0gOxdE3l0X_T5Hb5Wj2zfJDkOtvU,27518
+numpy/_core/tests/test_print.py,sha256=_cuM-DIpljOkzErb2ggIgs9HvOYrtpRppaECF6xAo0c,6787
+numpy/_core/tests/test_protocols.py,sha256=pbfumoRNnPhDP6PAPNIgLHUPPlmCdamCo4akkO8afjo,1173
+numpy/_core/tests/test_records.py,sha256=PAMHzIPp2WWDm4JHFQ-cjPBWf4BDuQumIYo7UX-zElk,20547
+numpy/_core/tests/test_regression.py,sha256=fJJnesLRUyPziCbYVM9LfLSS3qAMUz1-mzddhV9Br-U,95565
+numpy/_core/tests/test_scalar_ctors.py,sha256=I3akKp6WdwsTGic8pYQC_c6AxPXPEXStywWOF0n_ivU,6724
+numpy/_core/tests/test_scalar_methods.py,sha256=tx1RoZ03QsWblqg3Dv_JkaBFUOOILKZIqaEsFEs4tfE,9117
+numpy/_core/tests/test_scalarbuffer.py,sha256=2mZblaScwhN8mdlQvUULAKt273B2ia-mjtNmL_2UxfQ,5638
+numpy/_core/tests/test_scalarinherit.py,sha256=OIvSjrltdNSSP2c5HvDQ6pza3aKfmfgtixu1Zbahpcg,2587
+numpy/_core/tests/test_scalarmath.py,sha256=gBHBZ5SQMru1A57FUEaIMk19GFdVLTRXiO9vVh4XVVc,46583
+numpy/_core/tests/test_scalarprint.py,sha256=NS-FQDWICDcuDF5gxTQuG1Td1-EiOXIXufI-dwvKwxU,19705
+numpy/_core/tests/test_shape_base.py,sha256=mRSruY7S84ula25ZoOvbcRg_ea_3C3338e1tmdmv1Uk,31536
+numpy/_core/tests/test_simd.py,sha256=u8xSZ6HNLJ9-siYNIuyd0RA7FbD1BLEmnV5TGUrt1FU,48823
+numpy/_core/tests/test_simd_module.py,sha256=JjXH4Yq-0K-R8FHqVDinNaqY_grb1fQFFyVTHGQ0pBg,3904
+numpy/_core/tests/test_stringdtype.py,sha256=NwBb0NsnnnFmjfWAemc_FEHTq_ArJ48mNj33AD_zOYM,57072
+numpy/_core/tests/test_strings.py,sha256=16hEUxlHI89-8YsoW9RfI-V4eU-GKwnJXEak-dB7lW8,57959
+numpy/_core/tests/test_ufunc.py,sha256=yO1DbSTyonZWsz8HoXV0E4YN5Xlg-aIHi6xn2gTi928,136356
+numpy/_core/tests/test_umath.py,sha256=D7wSX7JvIk80znwd8GsxYZIzp62It75SBzvKOZHeOXE,193840
+numpy/_core/tests/test_umath_accuracy.py,sha256=QCFAeiPN6rEO8fwDwJun4J1pCKm0bPsQK6-1pTYCMIY,5478
+numpy/_core/tests/test_umath_complex.py,sha256=LZMd-divBHQQ7dS34obwvmStXa8aNez45VIVTwPg_jM,23627
+numpy/_core/tests/test_unicode.py,sha256=qrQ7UC0yndXFYI7MiJu8y_I5jCK2lxOQcehE289MElk,12967
+numpy/_core/umath.py,sha256=t_SQIHR7dkMF-VRp8dKyroOEd90oqNlzmgGwaH28qW8,2130
+numpy/_core/umath.pyi,sha256=FIqmlQwQIueIrs-_QehV3guNEnJE2LxVs3NPCj38Vdo,2643
+numpy/_distributor_init.py,sha256=FBSJdgVHlQca5BrQEVYPoFm6KSTJhIFnWtWbEkEhTSo,421
+numpy/_distributor_init.pyi,sha256=6IvMzAmr0-Z6oqTkZcgXgrkJrQXVMjBih2AZvLdDgOE,27
+numpy/_expired_attrs_2_0.py,sha256=zP31EXmbwygcOEzyetDEp-RxL9cUfbUUht956zaOSf8,3826
+numpy/_expired_attrs_2_0.pyi,sha256=n2ipDUFTFS4puCD56dlNWGkVkw_b0M6cEyugo4Qh3HM,1253
+numpy/_globals.py,sha256=k5ZVnzUbKNSLPmZ0URYwJN5C_7xIzfMNaaSsBSrPTuI,3091
+numpy/_globals.pyi,sha256=IrHHIXmibXzgK0VUlECQLw4IEkveXSHo_ZWnTkfnLe4,280
+numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/_pyinstaller/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc,,
+numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc,,
+numpy/_pyinstaller/hook-numpy.py,sha256=MU22pQ4AkUYPQWu5C8pRDpnYXElLJ8R0FGNYJUQpiVE,1362
+numpy/_pyinstaller/hook-numpy.pyi,sha256=tAvtMPovoi-sur0D1NAo3_evSmYKLTh0bgRSC7QrCIk,349
+numpy/_pyinstaller/tests/__init__.py,sha256=pdPbCTRwpCJamlyvIi9HZTlqAvK5HPbGu3oMA0cu2Rs,329
+numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc,,
+numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc,,
+numpy/_pyinstaller/tests/pyinstaller-smoke.py,sha256=6iL-eHMQaG3rxnS5EgcvrCqElm9aKL07Cjr1FZJSXls,1143
+numpy/_pyinstaller/tests/test_pyinstaller.py,sha256=8K-7QxmfoXCG0NwR0bhIgCNrDjGlrTzWnrR1sR8btgU,1135
+numpy/_pytesttester.py,sha256=DjlYL8uINN2XWa3nnlX6gPGuoLjcx1Bie_PQzbp2cpA,6328
+numpy/_pytesttester.pyi,sha256=VXCuwPYTb9-PF6nxXwibwBbre0hW9jIB4nkzmtm2kls,497
+numpy/_typing/__init__.py,sha256=MG5Wv9dc3ZyOmDfidH5cFtykeyNM77ArC4R3UW7Tn-Y,7188
+numpy/_typing/__pycache__/__init__.cpython-312.pyc,,
+numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc,,
+numpy/_typing/__pycache__/_array_like.cpython-312.pyc,,
+numpy/_typing/__pycache__/_char_codes.cpython-312.pyc,,
+numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc,,
+numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc,,
+numpy/_typing/__pycache__/_nbit.cpython-312.pyc,,
+numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc,,
+numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc,,
+numpy/_typing/__pycache__/_scalars.cpython-312.pyc,,
+numpy/_typing/__pycache__/_shape.cpython-312.pyc,,
+numpy/_typing/__pycache__/_ufunc.cpython-312.pyc,,
+numpy/_typing/_add_docstring.py,sha256=_3g7D-6HAQ3MT4X6DE07yLua9LqWFhskNVx1TS7X9O4,3999
+numpy/_typing/_array_like.py,sha256=EPZUfJSjamvsWJ6Rs5ZwwA_5FhBpYdoifcVVtVcWPn0,4188
+numpy/_typing/_callable.pyi,sha256=_nn_VLm2TgIoGk4BIbZBpgubwoJCiDjIOFTz0WkxjXg,9139
+numpy/_typing/_char_codes.py,sha256=j07npk82Nb7Ira2z7ZTlU3UcOPwt2gM7qZKrPLdjT48,8764
+numpy/_typing/_dtype_like.py,sha256=8M5RekLqdheEjWMIn4RnbkEzsS7jCatCiT0D5hg-53c,3762
+numpy/_typing/_extended_precision.py,sha256=pknUqgak0FBNM-sERPqW-pFGH71_K-iehFSee5oQiqE,434
+numpy/_typing/_nbit.py,sha256=KSbKwOKttob-5ytT5vCVkHrDMn0YHvyptTTyj_6AYcw,632
+numpy/_typing/_nbit_base.py,sha256=nPZpsQltuR5B0iaAYF9qD2he_kXnmssv_RhaUNFsW-s,3058
+numpy/_typing/_nbit_base.pyi,sha256=kHAqTmpYUWbQyTUVRs4NKKcDwiEJgUzWvvT1FQgQ89I,740
+numpy/_typing/_nested_sequence.py,sha256=so1agYGHd5gDo_IBvvHqBB5lsqGbHqN_imyC5UHU-HI,2505
+numpy/_typing/_scalars.py,sha256=LhXY2BTHmeYKzeIZfpjvuMn-5eOLjU2n9z7z1l5bKf8,944
+numpy/_typing/_shape.py,sha256=6cFv-LbSyG9mlfSBOGGyul9Q_GUrlcHQC9JZa-m20cA,275
+numpy/_typing/_ufunc.py,sha256=HOkaE-6wV0fd3rmHZGC39YAHIIf8tyvlzekD4y4GQxA,156
+numpy/_typing/_ufunc.pyi,sha256=1Ni26dsi2fbH2oNvXDNNXaBPQQzdhkwA7VQ8eyuJS_c,26575
+numpy/_utils/__init__.py,sha256=hVnZ7C0MCSNbMw-Zyq-MKCYStaGX6RzqFMnnh7ed4dE,3477
+numpy/_utils/__init__.pyi,sha256=VxEygNvp90alV8zYsUSuDYNdF7BEucXUx3w55Ef7YXI,726
+numpy/_utils/__pycache__/__init__.cpython-312.pyc,,
+numpy/_utils/__pycache__/_convertions.cpython-312.pyc,,
+numpy/_utils/__pycache__/_inspect.cpython-312.pyc,,
+numpy/_utils/__pycache__/_pep440.cpython-312.pyc,,
+numpy/_utils/_convertions.py,sha256=0xMxdeLOziDmHsRM_8luEh4S-kQdMoMg6GxNDDas69k,329
+numpy/_utils/_convertions.pyi,sha256=4l-0UmPCyVA70UJ8WAd2A45HrKFSzgC0sFDBSnKcYiQ,118
+numpy/_utils/_inspect.py,sha256=zFuJABH08D1Kgq_eecYkD1Ogg0OXp1t4oqjZxM0kdLk,7436
+numpy/_utils/_inspect.pyi,sha256=wFajmQpCTXpMbJBbdiiyJMb29HkaMW0jEWLMqbQcQ5k,2255
+numpy/_utils/_pep440.py,sha256=it9P4_oHXWw3BxdoVz7JPMuj5kxF5M7_BJ8Z1m9nu0w,13988
+numpy/_utils/_pep440.pyi,sha256=xzYJoZ6DnjvgaKr8OsBwim77fAJ0xeQJI9XAt75gvfI,3870
+numpy/char/__init__.py,sha256=xs6pprMdmNeXVfuTRkU3nF9qdhutWdPu5oaep2AjWmc,93
+numpy/char/__init__.pyi,sha256=siwqDh7X7u4e0HGx3xg8eDaJVqy0_nac5y8UMzz-BcM,1540
+numpy/char/__pycache__/__init__.cpython-312.pyc,,
+numpy/conftest.py,sha256=pXdv-CKocoIEpr0DsYstu7TgqvNdzSvfiDNMlMwmqYk,8577
+numpy/core/__init__.py,sha256=wJNaRF1UFOnZKqiBrsshWLjTGiEZ9rvWlcit0xj7Y0w,1290
+numpy/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/core/__pycache__/__init__.cpython-312.pyc,,
+numpy/core/__pycache__/_dtype.cpython-312.pyc,,
+numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc,,
+numpy/core/__pycache__/_internal.cpython-312.pyc,,
+numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc,,
+numpy/core/__pycache__/_utils.cpython-312.pyc,,
+numpy/core/__pycache__/arrayprint.cpython-312.pyc,,
+numpy/core/__pycache__/defchararray.cpython-312.pyc,,
+numpy/core/__pycache__/einsumfunc.cpython-312.pyc,,
+numpy/core/__pycache__/fromnumeric.cpython-312.pyc,,
+numpy/core/__pycache__/function_base.cpython-312.pyc,,
+numpy/core/__pycache__/getlimits.cpython-312.pyc,,
+numpy/core/__pycache__/multiarray.cpython-312.pyc,,
+numpy/core/__pycache__/numeric.cpython-312.pyc,,
+numpy/core/__pycache__/numerictypes.cpython-312.pyc,,
+numpy/core/__pycache__/overrides.cpython-312.pyc,,
+numpy/core/__pycache__/records.cpython-312.pyc,,
+numpy/core/__pycache__/shape_base.cpython-312.pyc,,
+numpy/core/__pycache__/umath.cpython-312.pyc,,
+numpy/core/_dtype.py,sha256=GHBhfVtsVrP7v13IujEz9aGIENkYIdbfuRu-New1UnU,323
+numpy/core/_dtype.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/core/_dtype_ctypes.py,sha256=wX4m37b0zQgxlzT5OjE_uj2E5CpiX9E7HLFpO6h_lDY,351
+numpy/core/_dtype_ctypes.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/core/_internal.py,sha256=qxpHJELXNUcYJkJt1LktQuZm4BwYu4bXnMuBEOp6POU,949
+numpy/core/_multiarray_umath.py,sha256=T88HZgFD5VCuXRCSeLbPoj99nKUSdgyw8xWyf6eqhxQ,2098
+numpy/core/_utils.py,sha256=5fk18JN43Rg6YHvan6QjdrOeOuLtRlLVmP6MadBEJVA,923
+numpy/core/arrayprint.py,sha256=Lbe4smWXYFzd9sO9LLJ5PZS4C3bSvLt6HRtwSE56xN8,339
+numpy/core/defchararray.py,sha256=a9luvvni8gRrGVdKO7U_xwsFFvkzlxnVgxL75jLRmCI,347
+numpy/core/einsumfunc.py,sha256=CNucINgUIrpiLQn4xPI_mogwjfKlFA3h7gwAvRVwb5M,339
+numpy/core/fromnumeric.py,sha256=5TaonJVuC110qv3f3cqTtmjayTX0BmqJAgoAJn5H3ZI,343
+numpy/core/function_base.py,sha256=vhjhzsEzDd11RHg6pilfMJO3X6k94an5RAJqj-nlzms,351
+numpy/core/getlimits.py,sha256=6nCk4Tw0LjW7joWsprI5LiMzje1gsOjO2lSQ_OwBB8I,335
+numpy/core/multiarray.py,sha256=bjdPLbvJuj61M6TZkbB5NXOCNmH4QbUq6g3ePkKP6TA,793
+numpy/core/numeric.py,sha256=Ctk_QikyB2mM0xI0lBeB8YTUfTwQSXfVdpIMRtunbMo,360
+numpy/core/numerictypes.py,sha256=bXwTwzUahzbHrFGhS5RkJOvb6TYEsQnQC5ww9mN-1Vw,347
+numpy/core/overrides.py,sha256=1FZyb0U6JJuyojtxFvQ7HSJ2rpfhWec0F-X0mapCjc8,335
+numpy/core/overrides.pyi,sha256=-3xfjHfa4UaCuhTVwwRN4EOM5uz9vZR0gMeTVvEdbYI,525
+numpy/core/records.py,sha256=9yfFDxyOc68lXqfbaosgRNlw1dbWP8CRHzIPEtEtSgc,327
+numpy/core/shape_base.py,sha256=2srdQtF1d8LpUbDjGMXT-Tqz2K2NaTO-ZEC4viCYswY,339
+numpy/core/umath.py,sha256=hMVmNrICdqXRiiRG7UMV0Gr-9xYqJGmkONGQn20iK98,319
+numpy/ctypeslib/__init__.py,sha256=WFwMhpV2LJP-IQOspaInhV8c6XPKZwqppE-cvtIpqvU,193
+numpy/ctypeslib/__init__.pyi,sha256=R0tHAk1P0jw-HLYjjKBqXEjDyXhByrtbjrgOxht9tE4,619
+numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc,,
+numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc,,
+numpy/ctypeslib/_ctypeslib.py,sha256=NtEUpisQhDfETBLAkqYf7Ajq0xiNhZurb5SmGGH54pA,19079
+numpy/ctypeslib/_ctypeslib.pyi,sha256=xS-NLEO6xwjUr-AUWfGxz3N7X5jwIGBVl6RhOUUYZ74,8084
+numpy/doc/__pycache__/ufuncs.cpython-312.pyc,,
+numpy/doc/ufuncs.py,sha256=9xt8H34GhrXrFq9cWFUGvJFePa9YuH9Tq1DzAnm2E2E,5414
+numpy/dtypes.py,sha256=zuPwgC0ijF2oDRAOJ6I9JKhaJuhXFAygByLQaoVtT54,1312
+numpy/dtypes.pyi,sha256=sNN4kzUfhArHuKaMRKofBNZ57trl35UaZ51oDWrMmJ4,15544
+numpy/exceptions.py,sha256=x1z7C2RjrDFW8tLewbZjyMiQok0WBm5kKuRPIxVLUjg,7800
+numpy/exceptions.pyi,sha256=MJbCHjwFGps97WaVOPkaoUb8wi-l5OUbcFHdWZgBGbI,751
+numpy/f2py/__init__.py,sha256=cAgUHWgJQZZsfv8co8KBNr_m8B6fpzdBaUNvJeBf_No,2448
+numpy/f2py/__init__.pyi,sha256=UbgqGZKYnDHGHX9MlwBB3aBZ2T470ojrNREIhkwt6gc,132
+numpy/f2py/__main__.py,sha256=6i2jVH2fPriV1aocTY_dUFvWK18qa-zjpnISA-OpF3w,130
+numpy/f2py/__pycache__/__init__.cpython-312.pyc,,
+numpy/f2py/__pycache__/__main__.cpython-312.pyc,,
+numpy/f2py/__pycache__/__version__.cpython-312.pyc,,
+numpy/f2py/__pycache__/_isocbind.cpython-312.pyc,,
+numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc,,
+numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc,,
+numpy/f2py/__pycache__/capi_maps.cpython-312.pyc,,
+numpy/f2py/__pycache__/cb_rules.cpython-312.pyc,,
+numpy/f2py/__pycache__/cfuncs.cpython-312.pyc,,
+numpy/f2py/__pycache__/common_rules.cpython-312.pyc,,
+numpy/f2py/__pycache__/crackfortran.cpython-312.pyc,,
+numpy/f2py/__pycache__/diagnose.cpython-312.pyc,,
+numpy/f2py/__pycache__/f2py2e.cpython-312.pyc,,
+numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc,,
+numpy/f2py/__pycache__/func2subr.cpython-312.pyc,,
+numpy/f2py/__pycache__/rules.cpython-312.pyc,,
+numpy/f2py/__pycache__/symbolic.cpython-312.pyc,,
+numpy/f2py/__pycache__/use_rules.cpython-312.pyc,,
+numpy/f2py/__version__.py,sha256=99S6mSevuhwGmO9ku--7VUJekhN0ot4-J0cZKiHcqpw,48
+numpy/f2py/__version__.pyi,sha256=L4V6f6B-wuPi82B0MzeQsgN0NuHUQs9rKYl1jy3tG7s,45
+numpy/f2py/_backends/__init__.py,sha256=7_bA7c_xDpLc4_8vPfH32-Lxn9fcUTgjQ25srdvwvAM,299
+numpy/f2py/_backends/__init__.pyi,sha256=i4XhDRwbrl0ta6QGJPxhYGfSgugNGdtoWf1_27eSd60,136
+numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc,,
+numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc,,
+numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc,,
+numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc,,
+numpy/f2py/_backends/_backend.py,sha256=oFXZ8-VwcQSbltl8_pgWLPqCOZ8Y_px7oeTk_BlxJTc,1151
+numpy/f2py/_backends/_backend.pyi,sha256=sU4YiHvGfMkzDFbhZqqQPT-kwJZsWpGemkLxDion7ss,1342
+numpy/f2py/_backends/_distutils.py,sha256=hET0WB4qy-D4BznekGAWhk945k5weq2lGUDR6hriXMo,2385
+numpy/f2py/_backends/_distutils.pyi,sha256=-L8K1KQShPGGd1vgr4DlnYf6AshHFaRzAcgGqKv205g,463
+numpy/f2py/_backends/_meson.py,sha256=VouUQkWRUk74WhDtkf6HR79QoK-Wrx8E7qO7gVpyDnk,8107
+numpy/f2py/_backends/_meson.pyi,sha256=wvYtBdippKeiSeLzaYKehql0_3ThS8T8Aqat03hhjQ4,1869
+numpy/f2py/_backends/meson.build.template,sha256=hQeTapAY0xtni5Li-QaEtWx9DH9WDKah2lcEuSZfLLo,1599
+numpy/f2py/_isocbind.py,sha256=zaBgpfPNRmxVG3doUIlbZIiyB990MsXiwDabrSj9HnQ,2360
+numpy/f2py/_isocbind.pyi,sha256=KuzqHJQk0YSQnRnb8xqnyh8T0DGNnDD6bNI880tadCY,339
+numpy/f2py/_src_pyf.py,sha256=PHpo9D28Kq3q_3-KFX8D3sFD9eX8A1c3LuLNzXzByOw,7695
+numpy/f2py/_src_pyf.pyi,sha256=9NKnovhbLibbQkjCrRnyiTPDw3MBqycOHl1--BNrIqw,1012
+numpy/f2py/auxfuncs.py,sha256=dnaUwrdAv4-LbEiHNbS1vrjQNCO0lBuyWkj3Rt_UizE,26920
+numpy/f2py/auxfuncs.pyi,sha256=7RUoWWaHrqSYEmdNd5zCNnmbjUYE5pCe0FCxMXejbhg,8011
+numpy/f2py/capi_maps.py,sha256=7C-NndI2UbStNGXbhgbWOmr9tLAxfQvw1zf7Z7w5SFk,30079
+numpy/f2py/capi_maps.pyi,sha256=pR0pVZhUxaCpctq7FOWFSAGI_gaLdE-NWAyT96cWWZg,1066
+numpy/f2py/cb_rules.py,sha256=6KbPu9yfJ-7pAa24Ij9H34Ll15Qc8CXTqCFiUJI6R8Y,25051
+numpy/f2py/cb_rules.pyi,sha256=X_it8-Q0188EDlXd-QxhRdc3OUoA2t6V_jgM5TiQC88,495
+numpy/f2py/cfuncs.py,sha256=4J4P12oGpyWZHb1AVKAl7YJ3QUgngwGMCnB1IhrJn7U,52660
+numpy/f2py/cfuncs.pyi,sha256=EiAtSQxw4x-UlxsGKIEOJnld1d7dNYrk0bt_rlqLSp0,802
+numpy/f2py/common_rules.py,sha256=_9yzIolJMGgpd3D94LdBsODnfUskMRgt2v03rECIHJQ,5030
+numpy/f2py/common_rules.pyi,sha256=1uzTkcwiin6dVBbWUiOVB1ZppjKBHoRHG_Byvw-1UbI,323
+numpy/f2py/crackfortran.py,sha256=vbAvWj6XszLS-nU0nOedaNNtwtqvkkM8gqZAP9MvPBI,146879
+numpy/f2py/crackfortran.pyi,sha256=AvV_KPeE9jLG9EdmPdb2u7-gPJXc1H2yWVmmihHzCgM,10276
+numpy/f2py/diagnose.py,sha256=YWNj1vM68e47Lb270wlZk5yrcU-yTlzGaYNPBZ7nTAU,5075
+numpy/f2py/diagnose.pyi,sha256=ZFVCWTwf_xzL736p9FcfCYWftOXcNqSMCmq-K27KNN8,23
+numpy/f2py/f2py2e.py,sha256=krSW4RpZPDHNX2IWLdn28KWzj0lzFNSc_6fScbGQMfI,28763
+numpy/f2py/f2py2e.pyi,sha256=Qt6ZeOYBugJLFpAY3F9K_4hcm0sZt_3APTtdKLKObWA,2153
+numpy/f2py/f90mod_rules.py,sha256=7Z5vorU4whX405xML66hr4i1icCUc9gr6an4R-AMh7M,9810
+numpy/f2py/f90mod_rules.pyi,sha256=r6w0DuH2Jdt8wPdDYAnXZAQmytIYUqPOxVz-QaWwt74,451
+numpy/f2py/func2subr.py,sha256=9igCMMDttIgF1MG6kBOagkjI_SF-UlGjACAj3Ncv0-o,10049
+numpy/f2py/func2subr.pyi,sha256=-MDbOrhanuizf3rlcwBQooCF4GnoGprA8ypeFV_m8d0,386
+numpy/f2py/rules.py,sha256=Irj-13oLGowNHYElFV-TZUs0VEd0NQpRsnomnI1NTx8,63091
+numpy/f2py/rules.pyi,sha256=9GfFmNA8Unlg3pxcGwqwFl7yeKyIcTmx7wiPuiBAT-k,1326
+numpy/f2py/setup.cfg,sha256=Fpn4sjqTl5OT5sp8haqKIRnUcTPZNM6MIvUJBU7BIhg,48
+numpy/f2py/src/fortranobject.c,sha256=kLiHOty8fUruzfOmL5MQeVNFJSGHBjn7W6QbPYgQb30,46356
+numpy/f2py/src/fortranobject.h,sha256=7cfRN_tToAQ1Na13VQ2Kzb2ujMHUAgGsbScnfLVOHqs,5823
+numpy/f2py/symbolic.py,sha256=UuFs411WYSqR7JfbsuyNv__IC9wKqxQAWoWRDeKPcdw,53214
+numpy/f2py/symbolic.pyi,sha256=piZrats8SXrOD1qEADo-mbsc5NZOIaZ27Fl3d3cydTc,6083
+numpy/f2py/tests/__init__.py,sha256=pdPbCTRwpCJamlyvIi9HZTlqAvK5HPbGu3oMA0cu2Rs,329
+numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc,,
+numpy/f2py/tests/__pycache__/util.cpython-312.pyc,,
+numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=JFU2w98cB_XNwfrqNtI0yDTmpEdxYO_UEl2pgI_rnt8,658
+numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=gvQJIzNtvacWE0dhysxn30-iUeI65Hpq7DiE9oRauz8,105
+numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=s6XLwujiCr6Xi8yBkvLPBXRmo2WsGVohU7K9ALnKUng,7478
+numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29
+numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460
+numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499
+numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269
+numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130
+numpy/f2py/tests/src/block_docstring/foo.f,sha256=y7lPCPu7_Fhs_Tf2hfdpDQo1bhtvNSKRaZAOpM_l3dg,97
+numpy/f2py/tests/src/callback/foo.f,sha256=C1hjfpRCQWiOVVzIHqnsYcnLrqQcixrnHCn8hd9GhVk,1254
+numpy/f2py/tests/src/callback/gh17797.f90,sha256=_Nrl0a2HgUbtymGU0twaJ--7rMa1Uco2A3swbWvHoMo,148
+numpy/f2py/tests/src/callback/gh18335.f90,sha256=NraOyKIXyvv_Y-3xGnmTjtNjW2Znsnlk8AViI8zfovc,506
+numpy/f2py/tests/src/callback/gh25211.f,sha256=a2sxlQhtDVbYn8KOKHUYqwc-aCFt7sDPSnJsXFG35uI,179
+numpy/f2py/tests/src/callback/gh25211.pyf,sha256=FWxo0JWQlw519BpZV8PoYeI_FZ_K6C-3Wk6gLrfBPlw,447
+numpy/f2py/tests/src/callback/gh26681.f90,sha256=-cD69x7omk5wvVsfMHlXiZ-pTcaxs2Bl5G9GHA4UJ2M,566
+numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=5rvOfCv-wSosB354LC9pExJmMoSHnbGZGl_rtA2fogA,142
+numpy/f2py/tests/src/cli/hi77.f,sha256=ttyI6vAP3qLnDqy82V04XmoqrXNM6uhMvvLri2p0dq0,71
+numpy/f2py/tests/src/cli/hiworld.f90,sha256=QWOLPrTxYQu1yrEtyQMbM0fE9M2RmXe7c185KnD5x3o,51
+numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224
+numpy/f2py/tests/src/common/gh19161.f90,sha256=BUejyhqpNVfHZHQ-QC7o7ZSo7lQ6YHyX08lSmQqs6YM,193
+numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=-5Din7YlY1TU7tUHD2p-_DSTxGBpDsWYNeT9WOwGhno,208
+numpy/f2py/tests/src/crackfortran/common_with_division.f,sha256=2LfRa26JEB07_ti-WDmIveq991PxRlL_K6ss28rZDkk,494
+numpy/f2py/tests/src/crackfortran/data_common.f,sha256=ZSUAh3uhn9CCF-cYqK5TNmosBGPfsuHBIEfudgysun4,193
+numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=jYrJKZWF_59JF9EMOSALUjn0UupWvp1teuGpcL5s1Sc,197
+numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=19YO7OGj0IksyBlmMLZGRBQLjoE3erfkR4tFvhznvvE,693
+numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=hoyXw330VHh8duMVmAQZjr1lgLVF4zFCIuEaUIrupv0,175
+numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=CaH7mnWTG7FcnJe2vXN_0zDbMadw6NCqK-JJ2HmDjK8,128
+numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=jJly1AzF5L9VxbVQ0vr-sf4LaUo4eQzJguhuemFxnvg,375
+numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=7K5dtOXGuBDAENPNCt-tAGJqTfNKz5OsqVSk16_e7Es,340
+numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=qZHPRNQljIeYNwbqPLxREnOrSdVV14f3fnaHqB1M7c0,241
+numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=w3tr_KcY3s7oSWGDmjfMHv5h0RYVGUpyXquNdNFOJQg,126
+numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=41W6Ire-5wjJTTg6oAo7O1WZfd1Ug9vvNtNgHS5MhEU,101
+numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=1v-hMCT_K7prhhamoM20nMU9zILam84Hr-imck_dYYk,205
+numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=LWDJTYR3t9h1IsrKC8dVXZlBfWX7clLeU006X6Ow8oI,332
+numpy/f2py/tests/src/crackfortran/gh27697.f90,sha256=bbnKpDsOuCWluoNodxzCspUQnu169zKTsn4fLTkhwpM,364
+numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=gPNasx98SIf7Z9ibk_DHiGKCvl7ERtsfoGXiFDT7FbM,282
+numpy/f2py/tests/src/crackfortran/operators.f90,sha256=-Fc-qjW1wBr3Dkvdd5dMTrt0hnjnV-1AYo-NFWcwFSo,1184
+numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=7bubZGMIn7iD31wDkjF1TlXCUM7naCIK69M9d0e3y-U,174
+numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=Pnwyf56Qd6W3FUH-ZMgnXEYkb7gn18ptNTdwmGan0Jo,167
+numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=eYpJwBYLKGOxVbKgEqfny1znib-b7uYhxcRXIf7uwXg,165
+numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=aINLh6GlfTwFewxvDoqnMqwuCNb4XAqi5Nj5vXguXYs,98
+numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=iUOtfHd3OuT1Rz2-yiSgt4uPKGvCt5AzQ1iygJt_yjg,82
+numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=iJCD8a8MUTmuPuedbcmxW54Nr4alYuLhksBe1sHS4K0,298
+numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=jcw-fzrFh0w5U66uJYfeUW4gv94L5MnWQ_NpsV9y0oI,998
+numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347
+numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85
+numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179
+numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139
+numpy/f2py/tests/src/modules/gh25337/data.f90,sha256=9Uz8CHB9i3_mjC3cTOmkTgPAF5tWSwYacG3MUrU-SY0,180
+numpy/f2py/tests/src/modules/gh25337/use_data.f90,sha256=WATiDGAoCKnGgMzm_iMgmfVU0UKOQlk5Fm0iXCmPAkE,179
+numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90,sha256=c7VU4SbK3yWn-6wksP3tDx_Hxh5u_g8UnlDpjU_-tBg,402
+numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90,sha256=eEU7RgFPh-TnNXEuJFdtJmTF-wPnpbHLQhG4fEeJnag,403
+numpy/f2py/tests/src/modules/module_data_docstring.f90,sha256=tDZ3fUlazLL8ThJm3VwNGJ75QIlLcW70NnMFv-JA4W0,224
+numpy/f2py/tests/src/modules/use_modules.f90,sha256=UsFfx0B2gu_tS-H-BpLWed_yoMDl1kbydMIOz8fvXWA,398
+numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=fdOPhRi7ipygwYCXcda7p_dlrws5Hd2GlpF9EZ-qnck,157
+numpy/f2py/tests/src/parameter/constant_array.f90,sha256=KRg7Gmq_r3B7t3IEgRkP1FT8ve8AuUFWT0WcTlXoN5U,1468
+numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939
+numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469
+numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612
+numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609
+numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610
+numpy/f2py/tests/src/quoted_character/foo.f,sha256=WjC9D9171fe2f7rkUAZUvik9bkIf9adByfRGzh6V0cM,482
+numpy/f2py/tests/src/regression/AB.inc,sha256=cSNxitwrjTKMiJzhY2AI5FaXJ5y9zDgA27x79jyoI6s,16
+numpy/f2py/tests/src/regression/assignOnlyModule.f90,sha256=c9RvUP1pQ201O_zOXgV0xp_aJF_8llxuA8Uot9z5tr0,608
+numpy/f2py/tests/src/regression/datonly.f90,sha256=9cVvl8zlAuGiqbSHMFzFn6aNWXj2v7sHJdd9A1Oc0qg,392
+numpy/f2py/tests/src/regression/f77comments.f,sha256=bqTsmO8WuSLVFsViIV7Nj7wQbJoZ7IAA3d2tpRDKsnA,626
+numpy/f2py/tests/src/regression/f77fixedform.f95,sha256=hcLZbdozMJ3V9pByVRp3RoeUvZgLMRLFctpZvxK2hTI,139
+numpy/f2py/tests/src/regression/f90continuation.f90,sha256=_W1fj0wXLqT91Q14qpBnM3F7rJKaiSR8upe0mR6_OIE,276
+numpy/f2py/tests/src/regression/incfile.f90,sha256=i7Y1zgMXR9bSxnjeYWSDGeCfsS5jiyn7BLb-wbwjz2U,92
+numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277
+numpy/f2py/tests/src/regression/lower_f2py_fortran.f90,sha256=CMQL5RWf9LKnnUDiS-IYa9xc9DGanCYraNq0vGmunOE,100
+numpy/f2py/tests/src/regression/mod_derived_types.f90,sha256=565plqPwWDgnkpSb4-cfZbf3wTM85F2Gocklx5wpGWA,567
+numpy/f2py/tests/src/return_character/foo77.f,sha256=WzDNF3d_hUDSSZjtxd3DtE-bSx1ilOMEviGyYHbcFgM,980
+numpy/f2py/tests/src/return_character/foo90.f90,sha256=ULcETDEt7gXHRzmsMhPsGG4o3lGrcx-FEFaJsPGFKyA,1248
+numpy/f2py/tests/src/return_complex/foo77.f,sha256=8ECRJkfX82oFvGWKbIrCvKjf5QQQClx4sSEvsbkB6A8,973
+numpy/f2py/tests/src/return_complex/foo90.f90,sha256=c1BnrtWwL2dkrTr7wvlEqNDg59SeNMo3gyJuGdRwcDw,1238
+numpy/f2py/tests/src/return_integer/foo77.f,sha256=_8k1evlzBwvgZ047ofpdcbwKdF8Bm3eQ7VYl2Y8b5kA,1178
+numpy/f2py/tests/src/return_integer/foo90.f90,sha256=bzxbYtofivGRYH35Ang9ScnbNsVERN8-6ub5-eI-LGQ,1531
+numpy/f2py/tests/src/return_logical/foo77.f,sha256=FxiF_X0HkyXHzJM2rLyTubZJu4JB-ObLnVqfZwAQFl8,1188
+numpy/f2py/tests/src/return_logical/foo90.f90,sha256=9KmCe7yJYpi4ftkKOM3BCDnPOdBPTbUNrKxY3p37O14,1531
+numpy/f2py/tests/src/return_real/foo77.f,sha256=ZTrzb6oDrIDPlrVWP3Bmtkbz3ffHaaSQoXkfTGtCuFE,933
+numpy/f2py/tests/src/return_real/foo90.f90,sha256=gZuH5lj2lG6gqHlH766KQ3J4-Ero-G4WpOOo2MG3ohU,1194
+numpy/f2py/tests/src/routines/funcfortranname.f,sha256=oGPnHo0zL7kjFnuHw41mWUSXauoeRVPXnYXBb2qljio,123
+numpy/f2py/tests/src/routines/funcfortranname.pyf,sha256=coD8AdLyPK4_cGvQJgE2WJW_jH8EAulZCsMeb-Q1gOk,440
+numpy/f2py/tests/src/routines/subrout.f,sha256=RTexoH7RApv_mhu-RcVwyNiU-DXMTUP8LJAMSn2wQjk,90
+numpy/f2py/tests/src/routines/subrout.pyf,sha256=c9qv4XtIh4wA9avdkDJuXNwojK-VBPldrNhxlh446Ic,322
+numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815
+numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618
+numpy/f2py/tests/src/string/fixed_string.f90,sha256=5n6IkuASFKgYICXY9foCVoqndfAY0AQZFEK8L8ARBGM,695
+numpy/f2py/tests/src/string/gh24008.f,sha256=UA8Pr-_yplfOFmc6m4v9ryFQ8W9OulaglulefkFWD68,217
+numpy/f2py/tests/src/string/gh24662.f90,sha256=-Tp9Kd1avvM7AIr8ZukFA9RVr-wusziAnE8AvG9QQI4,197
+numpy/f2py/tests/src/string/gh25286.f90,sha256=2EpxvC-0_dA58MBfGQcLyHzpZgKcMf_W9c73C_Mqnok,304
+numpy/f2py/tests/src/string/gh25286.pyf,sha256=GjgWKh1fHNdPGRiX5ek60i1XSeZsfFalydWqjISPVV8,381
+numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=6Y9zU66NfcGhTXlFOdFjCSMSwKXpq5ZfAe3FwpkAsm4,384
+numpy/f2py/tests/src/string/scalar_string.f90,sha256=ACxV2i6iPDk-a6L_Bs4jryVKYJMEGUTitEIYTjbJes4,176
+numpy/f2py/tests/src/string/string.f,sha256=shr3fLVZaa6SyUJFYIF1OZuhff8v5lCwsVNBU2B-3pk,248
+numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=JC0FfVXsnB2lZHb-nGbySnxv_9VHAyD0mKaLDowczFU,190
+numpy/f2py/tests/test_abstract_interface.py,sha256=PXNQB0DZdmdZyysJkB8f9GY0_hA3hGkmha8aQBXc1Sk,811
+numpy/f2py/tests/test_array_from_pyobj.py,sha256=N1RJ0yFcLs6cFmdxSjizjfLRTEhdKRhrO9Vx8bcG0GU,23696
+numpy/f2py/tests/test_assumed_shape.py,sha256=8kPoQWn6IfMWNMba0al7a5XopKb3JnvZP3V3P6O2F8o,1467
+numpy/f2py/tests/test_block_docstring.py,sha256=P3K0QqnY0UfUQPc3vDrlP_WlZ6gNJ7iokG-D-ZG9tXQ,584
+numpy/f2py/tests/test_callback.py,sha256=P_5qM1xWOYfjeDgd70cIVpV1h0_tA1AP3kxRZDAeqII,7099
+numpy/f2py/tests/test_character.py,sha256=R6FhfIi85E6L1qwlJtsnTCvNgFRriE3kSXefTwIVgLk,21931
+numpy/f2py/tests/test_common.py,sha256=gr4MF659JBWvSY4eQAqgHnOrVbEpq0ZhGM5Cdbye1L4,644
+numpy/f2py/tests/test_crackfortran.py,sha256=x_E4KmEfBX5SFsNkO_-mUi4W_WuzB-ZFsLOfUdHjLVE,16413
+numpy/f2py/tests/test_data.py,sha256=tete-xcIZHZi5VFjy_pyTjr5AjhQzoyJvLsT9QLYU1M,2895
+numpy/f2py/tests/test_docs.py,sha256=wGsRmCJugExEAvj25pANoLr45S6fkpG4kf47dnfg9Ew,1855
+numpy/f2py/tests/test_f2cmap.py,sha256=zM8lksGAoH-cRvEVRkzciZ4oqH28obd-vvMVUObVjt0,387
+numpy/f2py/tests/test_f2py2e.py,sha256=aGZnZH5USd8FJpG5F1L6bWfUzuUqP954lit5-TDPbeE,27834
+numpy/f2py/tests/test_isoc.py,sha256=g5PLyJuAYwF0obaZ55j_e-CNOODJcADsYFSfxcCl5LM,1434
+numpy/f2py/tests/test_kind.py,sha256=ovQVxbtbbnb-Keo8Dh2LpDyPLbIA1uxiZOzMLo5KMX0,1825
+numpy/f2py/tests/test_mixed.py,sha256=DZcTCCle0o4aopFmGi58KtxzP6NFFci4N-pL3_HLb90,862
+numpy/f2py/tests/test_modules.py,sha256=GaOwxLf8KLdNkWIl9fveT9xg_wvCFdDsel9QiFweCAE,2301
+numpy/f2py/tests/test_parameter.py,sha256=P8hDezlxKN_Cm06oWGkS0pwlJvQz5QYwBsyTEA_Y1PQ,4634
+numpy/f2py/tests/test_pyf_src.py,sha256=xV81hRiGeytFFKeVnp-6O2OrGVdzJyecMEalCQSoDoI,1134
+numpy/f2py/tests/test_quoted_character.py,sha256=x19FhD6ZA7JkDuLuiXi48sGd8b42SPRuwwEY8CVRb24,477
+numpy/f2py/tests/test_regression.py,sha256=APQz3e38jz-AbGEBN5n-P1Wuegx4Da1ze7D7nLLpUL8,6197
+numpy/f2py/tests/test_return_character.py,sha256=t8cxO8LatnBXf2EU-HkfmdxvdHMYDk9DLx3kNUTArC4,1534
+numpy/f2py/tests/test_return_complex.py,sha256=_uWrnSh-IDL8men8X__5srP4wM0RkICr4WVJgoNgrzY,2440
+numpy/f2py/tests/test_return_integer.py,sha256=ng_cpFX4nStcpSFoYdD9GiUdCJSXPU0On2MLOA4uOpQ,1813
+numpy/f2py/tests/test_return_logical.py,sha256=OrS11uAw_asDamL7inRKf-S-7SBG0GTS8Vrqlexrkm0,2048
+numpy/f2py/tests/test_return_real.py,sha256=ynInWwkcRfUe981kGJnrkkZeKK7QFlvkiODoIJj6Jg0,3273
+numpy/f2py/tests/test_routines.py,sha256=f9pR8FNJgKuBWtzCjlfniWVHJecpW6gSNkGDb2t693c,795
+numpy/f2py/tests/test_semicolon_split.py,sha256=akc4xJiHI6xOCfpCEtFYPMz8qy2K5jODEPyJHYQvLdE,1627
+numpy/f2py/tests/test_size.py,sha256=SjES727lNcCJFePDnh7uBhncOXWOcqHqVPbZPvBO5js,1155
+numpy/f2py/tests/test_string.py,sha256=47wYPuO1NkjhXSbbyS8vBKsNCju5dA9uMjNhGPx-BGg,2938
+numpy/f2py/tests/test_symbolic.py,sha256=dmuYLhhcv-rT-ux_aVrWaJj_Yxmznezl6Enu8-ediK0,18342
+numpy/f2py/tests/test_value_attrspec.py,sha256=4wY9qPXl0JoPGCG7GyyuMDKLfsHAV8KRWGdEk9-ZZT8,330
+numpy/f2py/tests/util.py,sha256=KIDsCW5uZXe6jSdWpY9Ozlqs5-v-eeDsW3P5TDWKDzo,12112
+numpy/f2py/use_rules.py,sha256=emZhSLPbNDyBHnsfKKXDnGz4P_gwrgL0dfCZcD3n9D4,3376
+numpy/f2py/use_rules.pyi,sha256=gIAAemWfcidclVYZUpa6RRmSdUEDw4FDnGPaCNo93Zw,424
+numpy/fft/__init__.py,sha256=OWE0m6H_blyv1wtqQpiXU5kqxF6O2UxxcV5t11U05RE,8291
+numpy/fft/__init__.pyi,sha256=6XgAsd9coqJ3jBOPD3vn1-8AcbMLhjxzQd21xjeqmlA,514
+numpy/fft/__pycache__/__init__.cpython-312.pyc,,
+numpy/fft/__pycache__/_helper.cpython-312.pyc,,
+numpy/fft/__pycache__/_pocketfft.cpython-312.pyc,,
+numpy/fft/__pycache__/helper.cpython-312.pyc,,
+numpy/fft/_helper.py,sha256=hIn2ZyEYG4fLB3MGvCPvpSrLXFfh-xO4zGKljk_TQjY,6787
+numpy/fft/_helper.pyi,sha256=1A1kitc5k62ER6X1XLF7PIQL5FiVxxRKu_iCqiQ1kIU,1394
+numpy/fft/_pocketfft.py,sha256=CfpApR9R0SOucql9gp9vXadm_y5cBM-Xnj5trDpvFSE,62598
+numpy/fft/_pocketfft.pyi,sha256=_RIRwdhtixjN4qszZk-xeYn2jmcW_NNAMEJHeETigv0,3174
+numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so,sha256=6EnciecXsqMVlI3QEneGCYNyx6IgbP0TD6gYtz5pFk0,539072
+numpy/fft/helper.py,sha256=RoEADsOnoCgSTL1gE5n-36llz8iwxGzn52af3L-9KEY,611
+numpy/fft/helper.pyi,sha256=KsF45bVyZ4_eJbBFpkER9L8MCWmg7dJuhLqY_7uFNZs,891
+numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/fft/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc,,
+numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc,,
+numpy/fft/tests/test_helper.py,sha256=LeVDCCdHzFhmCQ5ByMtVyA22GphgTQS5dupuxrLE8X0,6154
+numpy/fft/tests/test_pocketfft.py,sha256=PCF833rSWsXOMWN8wCluhq0aYHU24_tHbuMl1PuO6dE,24446
+numpy/lib/__init__.py,sha256=zYGuqEfPqq7LDbidpxYs8GgCNAmoJ4xQgFvF3XKJ5Rg,3004
+numpy/lib/__init__.pyi,sha256=Z7OsQAZGURd4cI3xnEF37unbOUqtknwEkT8yQTF-AF8,1651
+numpy/lib/__pycache__/__init__.cpython-312.pyc,,
+numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_datasource.cpython-312.pyc,,
+numpy/lib/__pycache__/_format_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_iotools.cpython-312.pyc,,
+numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_utils_impl.cpython-312.pyc,,
+numpy/lib/__pycache__/_version.cpython-312.pyc,,
+numpy/lib/__pycache__/array_utils.cpython-312.pyc,,
+numpy/lib/__pycache__/format.cpython-312.pyc,,
+numpy/lib/__pycache__/introspect.cpython-312.pyc,,
+numpy/lib/__pycache__/mixins.cpython-312.pyc,,
+numpy/lib/__pycache__/npyio.cpython-312.pyc,,
+numpy/lib/__pycache__/recfunctions.cpython-312.pyc,,
+numpy/lib/__pycache__/scimath.cpython-312.pyc,,
+numpy/lib/__pycache__/stride_tricks.cpython-312.pyc,,
+numpy/lib/__pycache__/user_array.cpython-312.pyc,,
+numpy/lib/_array_utils_impl.py,sha256=GYWiyNqLQ7DGUSBXz0bbR6AAqZStDIwUe7tsbZ__15M,1697
+numpy/lib/_array_utils_impl.pyi,sha256=AktSeZcFe_XUQ6utYHQyJKG8l8bhM8tQL2Kttj1DjcQ,820
+numpy/lib/_arraypad_impl.py,sha256=z5--XT80TcnDZezHVrdxauJSY3yC4vMDdd7JlO-h3zw,32296
+numpy/lib/_arraypad_impl.pyi,sha256=W98XPsguuf8B924KVVxs6l_EOBM9JKzwTmHL98CKbs0,1837
+numpy/lib/_arraysetops_impl.py,sha256=VFdgpFZJcyJhYFPcTk_LQD_SrqX6poy_shsLKvZigy0,41275
+numpy/lib/_arraysetops_impl.pyi,sha256=Yh-w9l43w6vMBLfwzIKQlxHcE6gFqOsfu5gyKpMgc_s,13403
+numpy/lib/_arrayterator_impl.py,sha256=HtOADIHuG9ADbbMTgmh4P_muke1V-8E-FNEO3bVOGPA,7218
+numpy/lib/_arrayterator_impl.pyi,sha256=8u0nb5NPpWNib-FlWaXlp6BXBPgTv5__NF30FD_1qmM,1876
+numpy/lib/_datasource.py,sha256=zk-Vbn4JlDHEVa3De6A3NgjnnizSJi-HF0ZvvA6YIo4,22731
+numpy/lib/_datasource.pyi,sha256=135RvD3p-3mHdNp_sZV4aN9brwEFvEM49VE1eHlFEfs,996
+numpy/lib/_format_impl.py,sha256=zcQ3xXxPf7epktsYrcdBbIPuOCh9OPV1g3gB6ghf4rE,36865
+numpy/lib/_format_impl.pyi,sha256=_0lEht2hKbTevv0eGChmYMBTAg-2jAfvrfU9p326VHs,869
+numpy/lib/_function_base_impl.py,sha256=AZGyN29Ecw4LRuU1TNUcPC7cVHO9ye4bJ9FQI7n_Gwc,196425
+numpy/lib/_function_base_impl.pyi,sha256=HY21gmJcUIvTsYL2UUZ8l2MYj34cp_7Mdsmck-FjeEE,24116
+numpy/lib/_histograms_impl.py,sha256=Utu7aAQc7ZpsHn_04ogUnZq1ZcdHfipcq9eRq817oVU,38432
+numpy/lib/_histograms_impl.pyi,sha256=QouOxW0sa_LMJ2hDv5WEO9k95mTMjEvbP2-7swNJxzI,1093
+numpy/lib/_index_tricks_impl.py,sha256=g7Np4E8AG9sgyi9HTUgvOM08pIlAj_cvXw4cc7NrU5I,32186
+numpy/lib/_index_tricks_impl.pyi,sha256=gQwY1mj_Sxk2eo9BXcqJ68F88XvzQB81o0nNUkQ9w9o,6325
+numpy/lib/_iotools.py,sha256=0jtpvpl5L-_1ODI21F-1i19t1e3L-6wJxRd1CSLewL0,30876
+numpy/lib/_iotools.pyi,sha256=69hfBI89W2UP6ozHiSByt-GxTupni-gBRPihFbXSh6Q,3393
+numpy/lib/_nanfunctions_impl.py,sha256=cdOT7dYwjvUpI9iEHTrwzbbtKhP9ZZgOCMirTBeYPUk,71949
+numpy/lib/_nanfunctions_impl.pyi,sha256=j5dyJz_c-SQDxXrL9N2ouKC-DsP_EVDZyLedGXqCpMI,833
+numpy/lib/_npyio_impl.py,sha256=kucazwCufh4mNwECyZxEerxsqa_GxQMz1kYuZURDI8s,99277
+numpy/lib/_npyio_impl.pyi,sha256=WWlGxbobwLgEiD-k58g_Q9K1HW1vDk--AYrBSjjqALE,9388
+numpy/lib/_polynomial_impl.py,sha256=TWiqlG3WDa97tayxQCEltZD9TNhUyFprzL_Umd7Lxso,44134
+numpy/lib/_polynomial_impl.pyi,sha256=9PvnPmeCk45ldiJ8xHwsIVdX9DrjPhY9H7CEFbVJMLQ,6999
+numpy/lib/_scimath_impl.py,sha256=QAU4uM_INzVqCTs-ATEyy1JhREl_wDJn_ygU75YtfgE,15692
+numpy/lib/_scimath_impl.pyi,sha256=pXBZjHPB_FbeBfe9M3N8TjrET_oclGuafWjTHC-xjUs,2774
+numpy/lib/_shape_base_impl.py,sha256=5vkU9rPOwKvSc7TzxdfWtM08uV0m15iHPTxbqcY47Oc,39479
+numpy/lib/_shape_base_impl.pyi,sha256=36gmgbFd1cUmSUfUihFtb1brc2gKLYi8NXDAEzLyBmQ,5412
+numpy/lib/_stride_tricks_impl.py,sha256=y3Uxp3jFzDwmIQ137N2zap7-vW_jONUQmXnbfqrs60A,18025
+numpy/lib/_stride_tricks_impl.pyi,sha256=6rR7IO04w1FPCKUM920r9Kf_A_hpZbIABo6Rcl34tFI,1815
+numpy/lib/_twodim_base_impl.py,sha256=3nOLvCD6cfM6MD3o381F48GB8poqsUGDCDOQlOBQXmY,33925
+numpy/lib/_twodim_base_impl.pyi,sha256=nBRqOTSD21ioBkUw6vtzy1-ZyczJcvybkvG3-hvSIkY,11193
+numpy/lib/_type_check_impl.py,sha256=WeVfWz_0Klvb2K_6l0x4nHwHBwPYgfcxeZinV_dp_mw,19221
+numpy/lib/_type_check_impl.pyi,sha256=xpZV5LStVGHbEDAcJUbD7iZFE0onwCPZZuwb01P4o_Q,9713
+numpy/lib/_ufunclike_impl.py,sha256=0eemf_EYlLmSa4inNr3iuJ1eoTMqLyIR0n6dQymga3Y,6309
+numpy/lib/_ufunclike_impl.pyi,sha256=SJ7wbjWFI6WL_rp3CNqbZoKoza4Ou4uDwXvpt4iekys,1288
+numpy/lib/_user_array_impl.py,sha256=t3nnrFuvbBizFV1K3C9NNyIM80LU5spA88MlrYJzEok,7697
+numpy/lib/_user_array_impl.pyi,sha256=AZpI9fHHYpLxyYL9ud5YDHcZhxLl-YpfB23i9f154BQ,9110
+numpy/lib/_utils_impl.py,sha256=7BSreRcHNIsUeMj3U1GbqzVjJYKvyuEWHdG_C4TM46Q,23346
+numpy/lib/_utils_impl.pyi,sha256=ckxdUjdGEaa3JAKVQZHYgZ1R3glZZg-ssh90vkV7dJg,371
+numpy/lib/_version.py,sha256=4dUrc9Js0KPEQ5adoYKR5dnP4ffjCDtJUKPqcMauwY4,4851
+numpy/lib/_version.pyi,sha256=vysY5Vl_nh4si6GkMXEoB6pUDl-jJ5g0LpSDa40F124,641
+numpy/lib/array_utils.py,sha256=XbcyhJ9S0IlNnP9Ny6yygLMEACWWUPNOU8vevj1TEpI,144
+numpy/lib/array_utils.pyi,sha256=LfY_fzfTdtjoLIi5FSCDsC5weYrmAHFh7fxFfniupbg,296
+numpy/lib/format.py,sha256=npJ0eJhT7uKNK5a0lCMGfiJv-R4jyNhiIPeZbJcNXBs,477
+numpy/lib/format.pyi,sha256=fh-5SN4MORvjLliV8LwOb3VqG8tFvOaMeG4Vn5CBusA,1482
+numpy/lib/introspect.py,sha256=u-wgfMuYt8GI3AnRNdXs4j4w9eNTsazlqrazS-P7gKA,2749
+numpy/lib/introspect.pyi,sha256=AWVX6b9mzdwsxizOY0LydWKBEpGatHaeeXGc2txYJEM,152
+numpy/lib/mixins.py,sha256=Kff76ScpgWV3cruicI9A7a4zfBnGVmXtwQzMzu5xDEo,7200
+numpy/lib/mixins.pyi,sha256=I3iXqrcHpV4jwsgBGJKT2Ero2SlTSEZZDmfcx3DJ7Cc,3131
+numpy/lib/npyio.py,sha256=eaPvfHGSzUE70TJHHLOCPIX9G5ihMuBEexy6_PNhJ9Q,68
+numpy/lib/npyio.pyi,sha256=qX68dlgy7M2MtAgNSabTV8rWOTXOXCE1_72XcdJq10Y,192
+numpy/lib/recfunctions.py,sha256=T4aa5xXav9ntfw5YmzPiq_YUkh12wGk40XyBLQPCEzU,59539
+numpy/lib/recfunctions.pyi,sha256=NTf4FyM2Kinx56nNHcyGjKUz_RBSJQr-qtZsLKeIYvQ,13216
+numpy/lib/scimath.py,sha256=qjFaQeq0zEIl7gKqOhaj_vmCC_KaFdyTmHdLUUkSp5I,169
+numpy/lib/scimath.pyi,sha256=Fe7sfleFSY0uCGUj5gATxjEoMnva1nJ53YyP1wP11Nk,512
+numpy/lib/stride_tricks.py,sha256=x0_BfwlycBAlR3BvpxTndeP96dHBT_fASbkTTTzBYgI,88
+numpy/lib/stride_tricks.pyi,sha256=FLo0b8NlLPsS58VzjFFchivpBOjjE_meU0EhWEFPQNY,170
+numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/lib/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test__version.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_format.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_io.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc,,
+numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc,,
+numpy/lib/tests/data/py2-np0-objarr.npy,sha256=ZLoI7K3iQpXDkuoDF1Ymyc6Jbw4JngbQKC9grauVRsk,258
+numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258
+numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366
+numpy/lib/tests/data/py3-objarr.npy,sha256=7mtikKlHXp4unZhM8eBot8Cknlx1BofJdd73Np2PW8o,325
+numpy/lib/tests/data/py3-objarr.npz,sha256=vVRl9_NZ7_q-hjduUr8YWnzRy8ESNlmvMPlaSSC69fk,453
+numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96
+numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96
+numpy/lib/tests/test__datasource.py,sha256=0vL8l30yb53Wwnt0YbdqvOl2xQf9fc0S-0pRTMAdaYc,10581
+numpy/lib/tests/test__iotools.py,sha256=LTODsFclDQnIbKQb98hEysgVhQ6cs230aj45pA1QYFc,13765
+numpy/lib/tests/test__version.py,sha256=SwXoEqMap603c2jd7ONod0ZOVQeX6T-zArMf03OCHbw,1999
+numpy/lib/tests/test_array_utils.py,sha256=hPXtCjoBKe6MP91sg_04EBpRYg7MITVlCAgD1AScjx8,1118
+numpy/lib/tests/test_arraypad.py,sha256=GzqMIQ0Y8XLYmP5osXzl5W1Pcywy_OK-39STKoCWJc4,56155
+numpy/lib/tests/test_arraysetops.py,sha256=GKotFUbKgEfHybghYP1zIM0RWMqW1pa4cdYlML1seXQ,40445
+numpy/lib/tests/test_arrayterator.py,sha256=1LZmgQQJpndfwh3X2mL4JpaWvKQl9a0WAnQdSpXimhM,1301
+numpy/lib/tests/test_format.py,sha256=BTKd2lUodd8gNznWkh_Hl3mG8Mu8SOFADEqGd5kCw64,41956
+numpy/lib/tests/test_function_base.py,sha256=z2SkeGd9qQjXmaxk6bhoi06qlfxdrDzJEqRsDxIuEoM,171119
+numpy/lib/tests/test_histograms.py,sha256=QkcA46lJ1Y-T3f4-Qn7kn6J9bIid3RLK7NKMrUI3Rpw,33966
+numpy/lib/tests/test_index_tricks.py,sha256=bp4GFjqQ3s_taGDVsCOgs5YU7qtDMhQuPGwvcCxj2sk,20477
+numpy/lib/tests/test_io.py,sha256=8StHTe3-XsyPNBy4IveftRY1Zba2JTW3ALOHg_bEfRw,110989
+numpy/lib/tests/test_loadtxt.py,sha256=1R_xoumDPtPGQYoWh_WWCFKeb3-9WfLIoMHCYQQ0CtQ,40557
+numpy/lib/tests/test_mixins.py,sha256=9r6tgP4Wb6vCDn590PkHmHl-GBAoAL6_-mwp2wbiaO0,7009
+numpy/lib/tests/test_nanfunctions.py,sha256=1GGtPUD8bS5v2FxLr8e0BUgx9k6Iu-8WLZisawPY4Yw,54098
+numpy/lib/tests/test_packbits.py,sha256=REkoSXh9FVVTizyyHWkLqXFLIjt0rynXeixhK8-gBgk,17543
+numpy/lib/tests/test_polynomial.py,sha256=3Z7x5gf2cSb5pN5e0Sb_hZetF3mI5GrTLv-OaN7v0m0,12312
+numpy/lib/tests/test_recfunctions.py,sha256=xYsC_t_tpIpWJvS1pRU2HNxZTO1cJ3QZ1OnXt4ajm0s,43928
+numpy/lib/tests/test_regression.py,sha256=UURtmtwfrxMDF3UY1ZMNbgIJOa38jUzYKCmpYYD8e3Q,7716
+numpy/lib/tests/test_shape_base.py,sha256=ZWHeWCs9x0sD-L03h6kTmUdRHvxHVC-8KOu8KomhyKQ,27406
+numpy/lib/tests/test_stride_tricks.py,sha256=tBErppWSp8jAckkx_zN5ZbAhfKxZJ99cOQxDI9B_xh0,23030
+numpy/lib/tests/test_twodim_base.py,sha256=-djv2iP3W2sB4rAgj9Orl8alGwDFfPvcVu6CNvlKIcg,18925
+numpy/lib/tests/test_type_check.py,sha256=2M6uyLSI-CP13CAylnBn3kbT6nrK6wYWW-Scw13vsAQ,14796
+numpy/lib/tests/test_ufunclike.py,sha256=5a65WfziLpjPJ_yE8zg-A-q08xlyiU8_S1JH8kb-Uyw,3015
+numpy/lib/tests/test_utils.py,sha256=HRZxH8Rs-PxCpMAhgbNOrTfBrsA8B2eTOKypY0Udczw,2374
+numpy/lib/user_array.py,sha256=zs6u6TAXoAySGAZc1qE6fKD4AN-t6urZCaiZaKmHiso,63
+numpy/lib/user_array.pyi,sha256=8C-aTekEYA0bVU7F3turaw1w0j8FfFvDp9xKa9Pfe94,53
+numpy/linalg/__init__.py,sha256=7pVvFwOJFKOArGeUs6MNj3MNqqsx7xx0vt2_7avNAg4,2124
+numpy/linalg/__init__.pyi,sha256=C3fZHKPSa4wpfRqfTjw3DpzE5p-Czjus48OuMLsDckQ,1060
+numpy/linalg/__pycache__/__init__.cpython-312.pyc,,
+numpy/linalg/__pycache__/_linalg.cpython-312.pyc,,
+numpy/linalg/__pycache__/linalg.cpython-312.pyc,,
+numpy/linalg/_linalg.py,sha256=6rC77pyHWNOHk03DKEnwHezrUCYdAuItQfA61v8lYsw,115106
+numpy/linalg/_linalg.pyi,sha256=inijXDOFEzZayOL37HNKOqyH8wCLQaU0r__pO4do7Ag,11141
+numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so,sha256=46UEl6W4XZFSu4ZGDC_RgVSBzAMg4Wi45u-FXiZARdk,231833
+numpy/linalg/_umath_linalg.pyi,sha256=awvRP1FGuomyfeaR0wzHvrXURAI8tUF3u2RRZ24hkXw,1409
+numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so,sha256=yKClyTWZEb-1IENKoh6zVgluRTPO5foLIRXC2LMJd9c,30001
+numpy/linalg/lapack_lite.pyi,sha256=QjaS8R4uu6MiJDcCFNE5EOAYGnFCcrNz873gs2OUXEM,2672
+numpy/linalg/linalg.py,sha256=6NimP68tYa0qBRglWH87_tOh2scshtDpcwfvBvmd6Po,585
+numpy/linalg/linalg.pyi,sha256=8E5sbKeM5Ors7r143mM7A4ui8kFZM0SF7NfUGW1eN-4,932
+numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc,,
+numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc,,
+numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc,,
+numpy/linalg/tests/test_deprecations.py,sha256=9p_SRmtxj2zc1doY9Ie3dyy5JzWy-tCQWFoajcAJUmM,640
+numpy/linalg/tests/test_linalg.py,sha256=VEvQHtAe0o3iVPogcCv9Frx-0HOyNH7WsQHcbkVfgaQ,84998
+numpy/linalg/tests/test_regression.py,sha256=9a96oyeEGQMUxfw_-GUjNWqn51iu4Cf7kllJ0bKp9ws,6704
+numpy/ma/API_CHANGES.txt,sha256=F_4jW8X5cYBbzpcwteymkonTmvzgKKY2kGrHF1AtnrI,3405
+numpy/ma/LICENSE,sha256=BfO4g1GYjs-tEKvpLAxQ5YdcZFLVAJoAhMwpFVH_zKY,1593
+numpy/ma/README.rst,sha256=krf2cvVK_zNQf1d3yVYwg0uDHzTiR4vHbr91zwaAyoI,9874
+numpy/ma/__init__.py,sha256=XpDWYXwauDc49-INsk455D03Uw4p6xFdsdWOn2rt87U,1406
+numpy/ma/__init__.pyi,sha256=QV7F1eN7GQLA2V2vI_bYXC_XhoZl-2IqXHWIqJtXLKU,6946
+numpy/ma/__pycache__/__init__.cpython-312.pyc,,
+numpy/ma/__pycache__/core.cpython-312.pyc,,
+numpy/ma/__pycache__/extras.cpython-312.pyc,,
+numpy/ma/__pycache__/mrecords.cpython-312.pyc,,
+numpy/ma/__pycache__/testutils.cpython-312.pyc,,
+numpy/ma/core.py,sha256=Te0RIWw8JyG2iJJjeSiG_t1ahKAICDdr7_pl4G6Q1Yc,288881
+numpy/ma/core.pyi,sha256=RxL-vzdzpBB97UqNesAkHjvFxQUom1ARUvKurQgz58I,40459
+numpy/ma/extras.py,sha256=f8qf6t_x9k34OKmHiNIft9PFCyLYMeBSGhiYjhUuIpc,70680
+numpy/ma/extras.pyi,sha256=4nJDP_0yoEtchWJgczd0ubXba76TsGPBOuCRexQgVbE,3794
+numpy/ma/mrecords.py,sha256=00gzzy_xxC408pVZIRUSRhbwqc1UHcyhE-tO2FYM8IE,27073
+numpy/ma/mrecords.pyi,sha256=YW81zL9LDzi-L-2WI7135-HxBzj12n4YgARHh2qZ6Bs,1973
+numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/ma/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_core.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc,,
+numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc,,
+numpy/ma/tests/test_arrayobject.py,sha256=MSvEcxlsVt4YZ7mVXU8q_hkwM0I7xsxWejEqnUQx6hE,1099
+numpy/ma/tests/test_core.py,sha256=novMpyqUqf9O7970aVB2HUqTBSiUqMQINMas3PbTgjM,219717
+numpy/ma/tests/test_deprecations.py,sha256=Hye4FMqAdPOOCVnihbs4R8ntLvYJy6WF3LA29876urI,2569
+numpy/ma/tests/test_extras.py,sha256=BnFaTx33kNdLDuLJ74Dt1f7gGsD_noYFmBGA8UelUqI,78435
+numpy/ma/tests/test_mrecords.py,sha256=ZDEv-LbPlx4Qf9NQs8unNXgrdXupRv4IQljf4_vCr34,19894
+numpy/ma/tests/test_old_ma.py,sha256=PMA26SyXJxN0o-pPvyEhl_YF2zRcxuPRMPAXztKCphA,33018
+numpy/ma/tests/test_regression.py,sha256=_eskYMrmSHe-_iODK6mvRD5gN_w6NpAl5agsyIGRRUo,3303
+numpy/ma/tests/test_subclassing.py,sha256=_TQZ4WM2VG-yuITIXeRZbAZrWDHpxtQoLzDKbGRmuHM,16936
+numpy/ma/testutils.py,sha256=vNG1ay689zOktrm-33tyz0bsCLxkJHK6j--2JtHRPq4,10235
+numpy/matlib.py,sha256=_S9N8S2NNsHGQUcloxrxABtJDejHiUyMdMJO7SayPkA,10638
+numpy/matlib.pyi,sha256=d9Tw-ThrWNUgXKGTiQvCjqrkWQSWqHcXUXAxvYENtYk,9602
+numpy/matrixlib/__init__.py,sha256=Ut6IqfjuA-kwwo6HBOYAgFjXXC_h7YV_3HyDsKM72dk,243
+numpy/matrixlib/__init__.pyi,sha256=e9xC6kWhIYoPqa3-tmtxdaq8RLjXrBjpyXLqV-pV9UY,106
+numpy/matrixlib/__pycache__/__init__.cpython-312.pyc,,
+numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc,,
+numpy/matrixlib/defmatrix.py,sha256=wpw6lZU9X6qp8wAJokDXt2RBrL1eXqlmBt-ojIwYzlU,30875
+numpy/matrixlib/defmatrix.pyi,sha256=ReQicwbCq4EFGM6paj5KoTeFK3fyiBMC4fJLJcP0SI4,478
+numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc,,
+numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc,,
+numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc,,
+numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc,,
+numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc,,
+numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc,,
+numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc,,
+numpy/matrixlib/tests/test_defmatrix.py,sha256=G9v4-cGuAbHVVDCJ2rCUnQrSTUChOih_6ZMV-ZlYsNA,14977
+numpy/matrixlib/tests/test_interaction.py,sha256=BMpaAIeGOJ5EEHWuozBifN8l3Av5RO6jGoaPgdzTiqQ,11874
+numpy/matrixlib/tests/test_masked_matrix.py,sha256=UN212xE5e3G9OuwdOWvRMFT5-z3zIfjQQIIpY26a52k,8787
+numpy/matrixlib/tests/test_matrix_linalg.py,sha256=33UxWKz2NwI2Wt3pP0AyaooZ5tCFpbOePWek3XT0a4U,2149
+numpy/matrixlib/tests/test_multiarray.py,sha256=S5kjzsQR2YgT0qIGrNO1lUDl3o-h0EIdg_g3U3CnuRc,555
+numpy/matrixlib/tests/test_numeric.py,sha256=hZ-r921WDG8Ck8KmT6ulgykjHU1QaGY6gprC2OPo-vg,447
+numpy/matrixlib/tests/test_regression.py,sha256=XnfZ4RoTS49XMUyUlHVMc6wcWImNRja7DT1wTdEk428,934
+numpy/polynomial/__init__.py,sha256=gGSwLNpPCpXfPgiJSsgVoVsJ0AS1c-_MWlGOeiG55sI,6726
+numpy/polynomial/__init__.pyi,sha256=tVWqA3_ZzcTyfp5yIr4ca87Tgx4YtY4660UQi3JhfJI,688
+numpy/polynomial/__pycache__/__init__.cpython-312.pyc,,
+numpy/polynomial/__pycache__/_polybase.cpython-312.pyc,,
+numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc,,
+numpy/polynomial/__pycache__/hermite.cpython-312.pyc,,
+numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc,,
+numpy/polynomial/__pycache__/laguerre.cpython-312.pyc,,
+numpy/polynomial/__pycache__/legendre.cpython-312.pyc,,
+numpy/polynomial/__pycache__/polynomial.cpython-312.pyc,,
+numpy/polynomial/__pycache__/polyutils.cpython-312.pyc,,
+numpy/polynomial/_polybase.py,sha256=b0kCiTgUm8D5QC_LWSm6yNvwC79npDAeksK0vQPciCQ,39358
+numpy/polynomial/_polybase.pyi,sha256=mKbxu6z3iC6NnDNXHPrMm6Vo6RQvrvtCel7S5Mi3Q3Q,8187
+numpy/polynomial/_polytypes.pyi,sha256=e-uO5HmbYsWffZtOKCDgrxEqvUm-YKTqQKXj83m8j6s,22382
+numpy/polynomial/chebyshev.py,sha256=T0vrDsOrO8Ntxbzf_-0dv_lPyF5c45OjDoVJDzeGBAI,62322
+numpy/polynomial/chebyshev.pyi,sha256=jn21NMBsc4FYvC_5BM4kOfnYEaUSINdq3RyooS-5rjU,4787
+numpy/polynomial/hermite.py,sha256=IguwJittKDh3y0rF1M9lLuIptFXgq-PhaHNTjfE3CnA,54603
+numpy/polynomial/hermite.pyi,sha256=bNrlxTVHTskFUOKDbyrISXbOsmPxxhnAGmZmOF1mLpc,2463
+numpy/polynomial/hermite_e.py,sha256=fhuui2jLc0I5WEEsRDcyw8FKSFxOl9jr8b4yRIxEZqQ,52305
+numpy/polynomial/hermite_e.pyi,sha256=OyjRyzP7tz5sDP-90D4dpn82zJ4zPUCIzhpXaOCpkCY,2555
+numpy/polynomial/laguerre.py,sha256=XJ5dNqWuZNhqwARb_QW4nfrRHyJv1JMCgsP2W4-KE9M,52474
+numpy/polynomial/laguerre.pyi,sha256=_72JssagROc-vwt8W1i3aOo8s5l2v2G2NzMUm14vZnw,2191
+numpy/polynomial/legendre.py,sha256=sMJTmGdewNhccrK9CyfNIIFRgzmY-AJHhgo6zxtGYvo,51129
+numpy/polynomial/legendre.pyi,sha256=dPizRI4HLqAQ8Jms8Ta_HtsUyHV49fk3hFCZNOid1fo,2191
+numpy/polynomial/polynomial.py,sha256=-IICosb2j8ClsIfXPDWgXqLx6WuhU6olocU4JkxN7kI,52196
+numpy/polynomial/polynomial.pyi,sha256=A3oK3wKteiRkUcNEkzgvZQ11HIqloIRoxG2X9rPVZBE,2021
+numpy/polynomial/polyutils.py,sha256=mQEa3oCz9X-d1HaNdXkpBJzXWGzgY42WDMjJOn988O8,22657
+numpy/polynomial/polyutils.pyi,sha256=gnB7TQZclbMGneVVFE1z5LX5Qgs3GCidRTWL97rja-4,10235
+numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc,,
+numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc,,
+numpy/polynomial/tests/test_chebyshev.py,sha256=gcK5jVv1vG3O-VVMkZKpmweR6_4HQAXtzvbJ_ib0-B8,20650
+numpy/polynomial/tests/test_classes.py,sha256=nBsVHcubheo1s7t-jUXY984ptC2x-aWDPkWED1cUZt4,18552
+numpy/polynomial/tests/test_hermite.py,sha256=sexvJUDmac1JKL8qOeQr70KJTD1KdoJ10LKosFfBqm0,18687
+numpy/polynomial/tests/test_hermite_e.py,sha256=r3QQOUVoBBVgZzCjE3qzIl-wMcl_kI1Nuc-KGNy7rIw,19026
+numpy/polynomial/tests/test_laguerre.py,sha256=8c2h7Lj3F2DtuVuOPlS8ZL-dq_IoFxPrzREbuI5iZqQ,17637
+numpy/polynomial/tests/test_legendre.py,sha256=hMdOs_RzkGihUzg7gDmeM1FxkIT2UIgqkDWanucfMHg,18805
+numpy/polynomial/tests/test_polynomial.py,sha256=Pi_X6ThfxgVbgzyAnu3FcyTIUvpL9ENxRSanyUjgon8,22911
+numpy/polynomial/tests/test_polyutils.py,sha256=gO7B1oPBRRClF7WeXFsLjGwqUl9kjVIv7aAoHlhqVsk,3780
+numpy/polynomial/tests/test_printing.py,sha256=qk76AKCvHHqbsDnHIVf5fxIEH9Va4U9jwJkJ1b67k1o,21403
+numpy/polynomial/tests/test_symbol.py,sha256=ShBdNg9cvYy31fQnrn4gprZUSD0shz5r8zlG8CEq7gs,5375
+numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/random/LICENSE.md,sha256=EDFmtiuARDr7nrNIjgUuoGvgz_VmuQjxmeVh_eSa8Z8,3511
+numpy/random/__init__.pxd,sha256=9JbnX540aJNSothGs-7e23ozhilG6U8tINOUEp08M_k,431
+numpy/random/__init__.py,sha256=WFzntztUVNaiXCpQln8twyL8HSFNS7XAWJlJsQXgbqk,7480
+numpy/random/__init__.pyi,sha256=5X5UqSDkeruZafGWv9EnYb0RrjRs49r-TlzV3PPQOjs,2109
+numpy/random/__pycache__/__init__.cpython-312.pyc,,
+numpy/random/__pycache__/_pickle.cpython-312.pyc,,
+numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so,sha256=L4ZvVgABToQOllsfbzYZWSS7YX_lAQy0lDLecAl5tPQ,323168
+numpy/random/_bounded_integers.pxd,sha256=SH_FwJDigFEInhdliSaNH2H2ZIZoX02xYhNQA81g2-g,1678
+numpy/random/_bounded_integers.pyi,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtGZhfo,24
+numpy/random/_common.cpython-312-x86_64-linux-gnu.so,sha256=3CWDfhtTpkNvsgJa13OYYCEMwR6sx5jlDp5f_tvr9hc,258912
+numpy/random/_common.pxd,sha256=7kGArYkBcemrxJcSttwvtDGbimLszdQnZdNvPMgN5xQ,4982
+numpy/random/_common.pyi,sha256=02dQDSAflunmZQFWThDLG3650py_DNqCmxjmkv5_XpA,421
+numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc,,
+numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc,,
+numpy/random/_examples/cffi/extending.py,sha256=jpIL1njMhf0nehmlMHkgZkIxns2JC9GEDYgAChX87G8,884
+numpy/random/_examples/cffi/parse.py,sha256=PK9vdUxwmvdnFvH3rOpgnnpISwnid7ri5XOmBrMWpJw,1750
+numpy/random/_examples/cython/extending.pyx,sha256=ePnHDNfMQcTUzAqgFiEqrTFr9BoDmbqgjxzrDLvV8fE,2267
+numpy/random/_examples/cython/extending_distributions.pyx,sha256=ahvbdSuRj35DKJRaNFP5JDuPqveBBp-M9mFfF3Wd_M4,3866
+numpy/random/_examples/cython/meson.build,sha256=GxZZT_Lu3nZsgcqo_7sTR_IdMJaHA1fxyjwrQTcodPs,1694
+numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc,,
+numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc,,
+numpy/random/_examples/numba/extending.py,sha256=Z7Z_Xp7HPE4K5BZ7AwpZ29qvuftFAkvhMtNX53tlMMw,1959
+numpy/random/_examples/numba/extending_distributions.py,sha256=fdePXeUj46yXK0MK1cszxUHQiOTiNuNsrbZqPw4AdGs,2036
+numpy/random/_generator.cpython-312-x86_64-linux-gnu.so,sha256=uTLfyCVXI_zU_LKEaTNhjWZx_2eqTWJpaC5gsy6uSU4,993240
+numpy/random/_generator.pyi,sha256=aFPqfOxIpOIOmdY1xBcUpllMCv20iTq4PN7Ad_gd7HY,24009
+numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so,sha256=drlMZ3K1OmM6C82Is9cE7_SM-aX3jXxDpsGlA-p7drU,137960
+numpy/random/_mt19937.pyi,sha256=ZjOCfOQb1KLDywy8ZHy8pQb1C-DZvStqYK3OOB6rETo,775
+numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so,sha256=1XcPLj1ve8oR2oftDtkteLUYQ1wbe7xTaK5GbYfxg4U,148272
+numpy/random/_pcg64.pyi,sha256=bIlGJyN2X3gtKEzh6qwDdyXX88al_2vVmCzGNpbNifs,1142
+numpy/random/_philox.cpython-312-x86_64-linux-gnu.so,sha256=lN4_kLuxiAFBkV5qWDkkMN0bcjNpdtimbDDDdWG-fXg,120808
+numpy/random/_philox.pyi,sha256=xFogUASfSHdviqexIf4bGgkzbryir7Tik7z0XQR9xx4,1005
+numpy/random/_pickle.py,sha256=Lt47ma_vnnJHdnQlc5jZ_DqBHsdKi0QiUNaIkMf95qA,2742
+numpy/random/_pickle.pyi,sha256=5obQY7CZRLMDjOgRtNgzV_Bg5O9E8DK_G74j7J7q6qo,1608
+numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so,sha256=72W67_gd0OXh2WB3ReFF8oUA0uetpMwsK-rEcbiJsjA,89648
+numpy/random/_sfc64.pyi,sha256=wRrbkEGLNhjXa7-LyGNtO5El9c8B_hNRQqF0Kmv6hQM,682
+numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so,sha256=YodrCvuRmoDRD083dQb-o-yz6H1gbFMPByouT66Dszc,239072
+numpy/random/bit_generator.pxd,sha256=lArpIXSgTwVnJMYc4XX0NGxegXq3h_QsUDK6qeZKbNc,1007
+numpy/random/bit_generator.pyi,sha256=tX5lVJDp6J5bNzflo-1rNylceD30oDBYtbiYVA1cWOY,3604
+numpy/random/c_distributions.pxd,sha256=UCtqx0Nf-vHuJVaqPlLFURWnaI1vH-vJRE01BZDTL9o,6335
+numpy/random/lib/libnpyrandom.a,sha256=0hYJRONXaBW-JS5wptgs-kvXtPhgwBlh7nhh3JVFxho,71702
+numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so,sha256=RBeIO7oWpwHHpJc18NBxPEBn9iQBMXgefhhAy2R88vU,785752
+numpy/random/mtrand.pyi,sha256=Ds2d-DloxUUE2wNNMA1w6oqqPsgBilkaRMCLioBTiJA,22687
+numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/random/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_direct.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_extending.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_random.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_regression.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc,,
+numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc,,
+numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc,,
+numpy/random/tests/data/generator_pcg64_np121.pkl.gz,sha256=EfQ-X70KkHgBAFX2pIPcCUl4MNP1ZNROaXOU75vdiqM,203
+numpy/random/tests/data/generator_pcg64_np126.pkl.gz,sha256=fN8deNVxX-HELA1eIZ32kdtYvc4hwKya6wv00GJeH0Y,208
+numpy/random/tests/data/mt19937-testset-1.csv,sha256=Xkef402AVB-eZgYQkVtoxERHkxffCA9Jyt_oMbtJGwY,15844
+numpy/random/tests/data/mt19937-testset-2.csv,sha256=nsBEQNnff-aFjHYK4thjvUK4xSXDSfv5aTbcE59pOkE,15825
+numpy/random/tests/data/pcg64-testset-1.csv,sha256=xB00DpknGUTTCxDr9L6aNo9Hs-sfzEMbUSS4t11TTfE,23839
+numpy/random/tests/data/pcg64-testset-2.csv,sha256=NTdzTKvG2U7_WyU_IoQUtMzU3kEvDH39CgnR6VzhTkw,23845
+numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=vNSUT-gXS_oEw_awR3O30ziVO4seNPUv1UIZ01SfVnI,23833
+numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=uylS8PU2AIKZ185OC04RBr_OePweGRtvn-dE4YN0yYA,23839
+numpy/random/tests/data/philox-testset-1.csv,sha256=SedRaIy5zFadmk71nKrGxCFZ6BwKz8g1A9-OZp3IkkY,23852
+numpy/random/tests/data/philox-testset-2.csv,sha256=dWECt-sbfvaSiK8-Ygp5AqyjoN5i26VEOrXqg01rk3g,23838
+numpy/random/tests/data/sfc64-testset-1.csv,sha256=iHs6iX6KR8bxGwKk-3tedAdMPz6ZW8slDSUECkAqC8Q,23840
+numpy/random/tests/data/sfc64-testset-2.csv,sha256=FIDIDFCaPZfWUSxsJMAe58hPNmMrU27kCd9FhCEYt_k,23833
+numpy/random/tests/data/sfc64_np126.pkl.gz,sha256=MVa1ylFy7DUPgUBK-oIeKSdVl4UYEiN3AZ7G3sdzzaw,290
+numpy/random/tests/test_direct.py,sha256=-ugW0cpuYhFSGVDtAbpEy_uFk-cG0JKFpPpQMDyFJh4,19919
+numpy/random/tests/test_extending.py,sha256=8KgkOAbxrgU9_cj9Qm0F8r9qVEVy438Q-Usp7_HpSLQ,4532
+numpy/random/tests/test_generator_mt19937.py,sha256=X0AEzi3xy6FzyTpTZNT_lXyXS_LWOWFYc9nZ6QtkILQ,117812
+numpy/random/tests/test_generator_mt19937_regressions.py,sha256=QZVFTSN9gnJXN-ye89JfUoov1Cu65r4e32FMmCYje5U,8107
+numpy/random/tests/test_random.py,sha256=YSlHTwu6t7BAjDLZrBz4e8-ynSuV6eOHP9NwxDoZBvU,70298
+numpy/random/tests/test_randomstate.py,sha256=WbZBpZplBlgmhWKXNsj7d0Zw0BHJ2nxEerMRnuwyYnE,85749
+numpy/random/tests/test_randomstate_regression.py,sha256=1NgkJ60dVg8-UZ-ApepKlZGotqgenW_vZ3jqofMOSlw,8010
+numpy/random/tests/test_regression.py,sha256=DqqLLE3_MW04ltPhSXy44oFx_daO9b4I7NgI-WoMc-s,5471
+numpy/random/tests/test_seed_sequence.py,sha256=0lb4LRofbt_wHO-Cs_d1hwp1WcWjOmxH-OePkXST5bc,3310
+numpy/random/tests/test_smoke.py,sha256=epkUF47HanaSZVz9NVUt6xUmKZhJNolPIB-z4MN67Qw,28141
+numpy/rec/__init__.py,sha256=kNAYYoSAA0izpUVRb-18sJw-iKtFq2Rl2U5SOH3pHRM,83
+numpy/rec/__init__.pyi,sha256=1ZL2SbvFSaoXwOK-378QQ0g0XldOjskx2E2uIerEGUI,347
+numpy/rec/__pycache__/__init__.cpython-312.pyc,,
+numpy/strings/__init__.py,sha256=o27wHW8jGaUfbDopSyEmYD6Rjeo33AzkGBBTgWrlGH4,83
+numpy/strings/__init__.pyi,sha256=JP8YQR3xZ_mPMdQax7QSR2cZ-N-V7ZDqvOcWIIUP_54,1319
+numpy/strings/__pycache__/__init__.cpython-312.pyc,,
+numpy/testing/__init__.py,sha256=Eqe-Ox-3JSqk6QRnnPPFLCW9Ikqv9OuJDhnm2uGM3zc,581
+numpy/testing/__init__.pyi,sha256=1jr2Gj9BmCdtK4bqNGkwUAuqwC4n2JPOy6lqczK7xpA,2045
+numpy/testing/__pycache__/__init__.cpython-312.pyc,,
+numpy/testing/__pycache__/overrides.cpython-312.pyc,,
+numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc,,
+numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/testing/_private/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/testing/_private/__pycache__/__init__.cpython-312.pyc,,
+numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc,,
+numpy/testing/_private/__pycache__/utils.cpython-312.pyc,,
+numpy/testing/_private/extbuild.py,sha256=Lg1sqA94Q74Ki5u-sx0PEj7urr3YP470-BCiyvJwExQ,7716
+numpy/testing/_private/extbuild.pyi,sha256=aNH6UnAhh4Zny81W45GrAcScB12b6_84y8M0Vdtpm2I,626
+numpy/testing/_private/utils.py,sha256=PZFbAxTSOPFQ_VXMaDCgPFBSEk2kcEGh8GRiBJy_yJg,95707
+numpy/testing/_private/utils.pyi,sha256=9xlm7AQwi1yqOZN_t22jI_G9Ov-0tzX5H0ITHVz0UEE,12943
+numpy/testing/overrides.py,sha256=B8Y8PlpvK71IcSuoubXWj4L5NVmLVSn7WMg1L7xZO8k,2134
+numpy/testing/overrides.pyi,sha256=IQvQLxD-dHcbTQOZEO5bnCtCp8Uv3vj51dl0dZ0htjg,397
+numpy/testing/print_coercion_tables.py,sha256=SboNmCLc5FyV-UR8gKjJc2PIojN1XQTvH0WzDq75M2M,6286
+numpy/testing/print_coercion_tables.pyi,sha256=FRNibMYi0OyLIzKD4RUASZyhlsTY8elN0Q3jcBPEdgE,821
+numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/testing/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc,,
+numpy/testing/tests/test_utils.py,sha256=yb2RpPDZvVagXiwQPFhV2IhwslZRkC-d-Vtb5wbJbbo,69575
+numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/tests/__pycache__/test__all__.cpython-312.pyc,,
+numpy/tests/__pycache__/test_configtool.cpython-312.pyc,,
+numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc,,
+numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc,,
+numpy/tests/__pycache__/test_matlib.cpython-312.pyc,,
+numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc,,
+numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc,,
+numpy/tests/__pycache__/test_public_api.cpython-312.pyc,,
+numpy/tests/__pycache__/test_reloading.cpython-312.pyc,,
+numpy/tests/__pycache__/test_scripts.cpython-312.pyc,,
+numpy/tests/__pycache__/test_warnings.cpython-312.pyc,,
+numpy/tests/test__all__.py,sha256=AXbT9VmRSTYq9beba4d1Eom_V9SVXXEtpkBdEW2XCqU,222
+numpy/tests/test_configtool.py,sha256=aVO9XZPUq-T0LXFx-sQbtsOcnwKIFnpKtfuWWlnWDFs,1749
+numpy/tests/test_ctypeslib.py,sha256=RNTHi3cYOEPQno5zZQ_WyekW5E_0bVuwmn1AFgkDzY8,12375
+numpy/tests/test_lazyloading.py,sha256=mMbie5VOu7S4uQBu66RNA2ipSsAY4C0lyoJXeHclAvk,1160
+numpy/tests/test_matlib.py,sha256=RMduSGHBJuVFmk__Ug_hVeGD4-Y3f28G0tlDt8F7k7c,1854
+numpy/tests/test_numpy_config.py,sha256=y4U3wnNW0Ags4W_ejhQ4CRCPnBc9p-4-B9OFDcLq9fg,1235
+numpy/tests/test_numpy_version.py,sha256=6PIeISx9_Hglpxc3y6KugeAgB4eBkuZC-DFlXt4LocA,1744
+numpy/tests/test_public_api.py,sha256=KqMtjIjq0_lp2ag4FTtulzypCqyZ43kuUlXgzd_Vkxc,27851
+numpy/tests/test_reloading.py,sha256=T0NTsxAZFPY0LuAzbsy0wV_vSIZON7dwWSNjz_yzpDg,2367
+numpy/tests/test_scripts.py,sha256=QpjsWc0vgi-IFLdMr81horvHAnjRI7RhYyO-edHxzcU,1665
+numpy/tests/test_warnings.py,sha256=ynGuW4FOgjLcwdyi5AYCGCrmAu7jZlIQWPNK-0Yr800,2328
+numpy/typing/__init__.py,sha256=FdaIH47j8uGEA5luTu-DnrOOTFw-3ne2JVHe-yn_7bA,6048
+numpy/typing/__pycache__/__init__.cpython-312.pyc,,
+numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc,,
+numpy/typing/mypy_plugin.py,sha256=1pcfLxJaYFdCPKQJVwHvdYbZSVdZ7RSIcg1QXHR7nqM,6541
+numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/typing/tests/__pycache__/__init__.cpython-312.pyc,,
+numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc,,
+numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc,,
+numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc,,
+numpy/typing/tests/data/fail/arithmetic.pyi,sha256=B1iRvZyWH_beZWBFsUtF6aHtF99VBjWpHQya6IyY1o8,3690
+numpy/typing/tests/data/fail/array_constructors.pyi,sha256=2917vcb59EaV406oGtA9lSQ8n_KDDyfvMrxj1Na6rPM,1200
+numpy/typing/tests/data/fail/array_like.pyi,sha256=klBpaBcONODcPC1LztdVZ3UuIPwXN8kUK_e9s_QnZoo,496
+numpy/typing/tests/data/fail/array_pad.pyi,sha256=mt-nhrs6f4YP7xgXizti7k_AwFAJ50yK97fMrMAAEds,137
+numpy/typing/tests/data/fail/arrayprint.pyi,sha256=i-0R4ExF_gtfXML5qirbsQLmDwJyg6_37HDYsk6g6tI,616
+numpy/typing/tests/data/fail/arrayterator.pyi,sha256=9Y8aD2lkDO7UcLo9ySR0pnVz0p2ofl2Lq4XTHgoMXxA,463
+numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=3k-IDummVmwkdm3IVIPWCFp4e48TSTyZJKv1lDoPpvs,399
+numpy/typing/tests/data/fail/char.pyi,sha256=IrAk7rxT3ixBVwNWHzCWEsW9rF_oCXTOtwIG-q_Vx2A,2800
+numpy/typing/tests/data/fail/chararray.pyi,sha256=aTqYSgwQUGUVUDkOly_Dy5SdOiHdKKTEbo6jD68KaH0,2356
+numpy/typing/tests/data/fail/comparisons.pyi,sha256=zscovvsL89W8wrL43k4z8z1DLrjXmxBbu6lAOpiyhp0,750
+numpy/typing/tests/data/fail/constants.pyi,sha256=OHaBJo6GYW94BlUWKQDat5jiit5ZAy_h-5bb1WUJnLU,78
+numpy/typing/tests/data/fail/datasource.pyi,sha256=7om31_WCptsSn_z7E5p-pKFlswZItyZm9GQ-L5fXWqM,419
+numpy/typing/tests/data/fail/dtype.pyi,sha256=crqAVZmBYLYV9N-ihiOnKCY1QK4iTxX7J4Tviac2Vq4,305
+numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=4QWkwE4sr5bKz5-OCjPzeUcr4V-wpaMsqee2PXDwyjw,458
+numpy/typing/tests/data/fail/flatiter.pyi,sha256=3WblzrQewUBZo1mjTRWzwda_rQ0HSVamtz2XwH2CgCc,715
+numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=eWCbs1dcoJraAA3b5qME09sWWvsSdlDO912_OwQ_M7k,5685
+numpy/typing/tests/data/fail/histograms.pyi,sha256=wgI2CG-P0jbDw4KohR_nbJxqa34PT1e6nmnLi9KbPQM,376
+numpy/typing/tests/data/fail/index_tricks.pyi,sha256=RNZLHeMOpSX594Eem4WyJrM_QouqndGRVj2YQakJN-E,517
+numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=JdvdZlgNUNzlOuY74T6Lt_hNOpveU6U1jhFGB9Iu6ZA,2817
+numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=Y3jlwigvtr5tFEHvr3SgguMVsYZe8cvsdgKcavgfucs,937
+numpy/typing/tests/data/fail/lib_utils.pyi,sha256=6Oc_wYI0mv0l74p4pEiVtKjkWFNg4WmXjGW7_2zEKS4,98
+numpy/typing/tests/data/fail/lib_version.pyi,sha256=BvABs2aeC6ZHUGkrsowu80Ks22pbxUMwSPJ8c5i7H14,154
+numpy/typing/tests/data/fail/linalg.pyi,sha256=h9bcCeP0ARGONB3iYGkdX-BFPsNI-pZq3C-nfKgbbBU,1381
+numpy/typing/tests/data/fail/ma.pyi,sha256=inPaC4jP7hGPqQJn-rBspeJZnxJz7m1nVDYQxuMI8SE,6364
+numpy/typing/tests/data/fail/memmap.pyi,sha256=uXPLcVx726Bbw93E5kdIc7K0ssvLIZoJfNTULMtAa_8,169
+numpy/typing/tests/data/fail/modules.pyi,sha256=mEBLIY6vZAPIf2BuyJcMAR-FarSkT55FRlusrsR0qCo,603
+numpy/typing/tests/data/fail/multiarray.pyi,sha256=lSV5JiLNz-CxHUlNbF1Bq3x7mOftfr1kiiG2DgtXilE,1656
+numpy/typing/tests/data/fail/ndarray.pyi,sha256=65IDiOprlv-sg375SBmmh6_hYOzlucTVLe42GymniGM,381
+numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=hSdxKyxweyxAH32DGa_ZnZIXqPNh6CafBK90rjbi8cs,1061
+numpy/typing/tests/data/fail/nditer.pyi,sha256=nRbQ66HcoKXDKIacbWD9pTq-523GJOqxzJ3r278lDsc,319
+numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=jGjoQhCLr8dbTCPvWkilJKAW0RRMbrY-iEHf24Happo,463
+numpy/typing/tests/data/fail/npyio.pyi,sha256=vPYmFaPCFbr5V2AC3074w8hTCBUYxpSF4fi1sbbfopw,646
+numpy/typing/tests/data/fail/numerictypes.pyi,sha256=NJxviXTJIaDoz7q56dBrHCBNNG-doTu-oIryzwURxHQ,124
+numpy/typing/tests/data/fail/random.pyi,sha256=IvKXQuxZhuK6M0u2x3Y4PhXvLoC8OFnUdoeneaqDiIE,2903
+numpy/typing/tests/data/fail/rec.pyi,sha256=eeFXVvVg4DherRMA1T8KERtTiRN1ZIbarw4Yokb8WrU,741
+numpy/typing/tests/data/fail/scalars.pyi,sha256=EQy8ovBZX49a7AgtRyI8uHgauQoVzAmjE3NALe97tEw,2849
+numpy/typing/tests/data/fail/shape.pyi,sha256=VNucLx9ittp1a0AOyVPd6XKfERm0kq_ND1lOr-LXQ_s,131
+numpy/typing/tests/data/fail/shape_base.pyi,sha256=8366-8mCNii1D0W6YrOhCNxo8rrfqQThO-dIVWNRHvA,157
+numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=g7-DY8Zc8pzTDyOBA-8t6yIFj1FZI9XpvVdbybQN2i0,330
+numpy/typing/tests/data/fail/strings.pyi,sha256=wX9ROrRNhpH9g_ewNGjWuTKU-He4xaNxrtz2Dm3iPo8,2333
+numpy/typing/tests/data/fail/testing.pyi,sha256=m8d2OZZ1DtsHfmnTwvdMRETUfo0lwRzaOXjuyNi08PQ,1399
+numpy/typing/tests/data/fail/twodim_base.pyi,sha256=ROt5iqOp9ENbXlMEG8dzUZxHD3N4lwcbyCffuJ4BLZE,936
+numpy/typing/tests/data/fail/type_check.pyi,sha256=hRXyE4Ywx6zjtSgiHwKRs4k47M4hnPjj7yjVhi91IaU,397
+numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=v5rd68Y2TzLplIOaOXM4h66HqSv8XbapR0b3xaoUOdQ,589
+numpy/typing/tests/data/fail/ufunclike.pyi,sha256=ejCb6kb7mmxPH0QrDsYfdFSLLPFKx0IZ9xSLs3YXOzg,649
+numpy/typing/tests/data/fail/ufuncs.pyi,sha256=XBoxO597ponBkFcCfwCS3s-jKfcnDzC_K5n2uBPrD6E,505
+numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=SoFIznFd_xDifIsS0pv0aqS2BvhZaT6xsOA0zJrRJkA,200
+numpy/typing/tests/data/misc/extended_precision.pyi,sha256=n1nzRzRa_oKDdNExxB0qRIQr8MeDIosbLU6Vpgi6ZYo,322
+numpy/typing/tests/data/mypy.ini,sha256=rfUCMP01SsfRLJ-MRGEicI9XW-HJDoTJ_ncaACuKJ0s,245
+numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc,,
+numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc,,
+numpy/typing/tests/data/pass/arithmetic.py,sha256=t4UK-TROh0uYPlUNn5CZHdTysECmDZa04uUOCZO58cY,7762
+numpy/typing/tests/data/pass/array_constructors.py,sha256=rfJ8SRB4raElxRjsHBCsZIkZAfqZMie0VE8sSKMgkHg,2447
+numpy/typing/tests/data/pass/array_like.py,sha256=-wTiw2o_rLw1aeT7FSh60RKguhvxKyr_Vv5XNXTYeS4,1032
+numpy/typing/tests/data/pass/arrayprint.py,sha256=y_KkuLz1uM7pv53qfq7GQOuud4LoXE3apK1wtARdVyM,766
+numpy/typing/tests/data/pass/arrayterator.py,sha256=FqcpKdUQBQ0FazHFxr9MsLEZG-jnJVGKWZX2owRr4DQ,393
+numpy/typing/tests/data/pass/bitwise_ops.py,sha256=FmEs_sKaU9ox-5f0NU3_TRIv0XxLQVEZ8rou9VNehb4,964
+numpy/typing/tests/data/pass/comparisons.py,sha256=5aGrNl3D7Yd1m9WVkHrjJtqi7SricTxrEMtmIV9x0aE,3298
+numpy/typing/tests/data/pass/dtype.py,sha256=YDuYAb0oKoJc9eOnKJuoPfLbIKOgEdE04_CYxRS4U5I,1070
+numpy/typing/tests/data/pass/einsumfunc.py,sha256=eXj5L5MWPtQHgrHPsJ36qqrmBHqct9UoujjJCvHnF1k,1370
+numpy/typing/tests/data/pass/flatiter.py,sha256=tpKL_EAjkJoCZ5C0iuIX0dNCwQ9wUq1XlBMP-n2rjM4,203
+numpy/typing/tests/data/pass/fromnumeric.py,sha256=d_hVLyrVDFPVx33aqLIyAGYYQ8XAJFIzrAsE8QCoof4,3991
+numpy/typing/tests/data/pass/index_tricks.py,sha256=dmonWJMUKsXg23zD_mibEEtd4b5ys-sEfT9Fnnq08x8,1402
+numpy/typing/tests/data/pass/lib_user_array.py,sha256=Za_n84msWtV8dqQZhMhvh7lzu5WZvO8ixTPkEqO2Hms,590
+numpy/typing/tests/data/pass/lib_utils.py,sha256=bj1sEA4gsmezqbYdqKnVtKzY_fb64w7PEoZwNvaaUdA,317
+numpy/typing/tests/data/pass/lib_version.py,sha256=HnuGOx7tQA_bcxFIJ3dRoMAR0fockxg4lGqQ4g7LGIw,299
+numpy/typing/tests/data/pass/literal.py,sha256=HSG-2Gf7J5ax3mjTOeh0pAYUrVOqboTkrt2m6ssfqVY,1508
+numpy/typing/tests/data/pass/ma.py,sha256=ZIi85AwntBX7M1LIvl4yEGixAauHAS2GINBR42Ri4Hw,3362
+numpy/typing/tests/data/pass/mod.py,sha256=owFL1fys3LPTWpAlsjS-IzW4sSu98ncp2BnsIetLSrA,1576
+numpy/typing/tests/data/pass/modules.py,sha256=g9PhyLO6rflYHZtmryx1VWTubphN4TAPUSfoiYriTqE,625
+numpy/typing/tests/data/pass/multiarray.py,sha256=MxHax6l94yqlTVZleAqG77ILEbW6wU5osPcHzxJ85ns,1331
+numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=d7cFNUrofdLXh9T_9RG3Esz1XOihWWQNlz5Lb0yt6dM,1525
+numpy/typing/tests/data/pass/ndarray_misc.py,sha256=wBbQDHcpiIlMl-z5ToVOrFpoxrqXQMBq1dFSWfwGJNE,3699
+numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=37eYwMNqMLwanIW9-63hrokacnSz2K_qtPUlkdpsTjo,640
+numpy/typing/tests/data/pass/nditer.py,sha256=nYO45Lw3ZNbQq75Vht86zzLZ4cWzP3ml0rxDPlYt8_8,63
+numpy/typing/tests/data/pass/numeric.py,sha256=pOwxnmZmdCtDKh9ih0h5GFIUPJwsi97NBs1y5ZAGyUM,1622
+numpy/typing/tests/data/pass/numerictypes.py,sha256=6x6eN9-5NsSQUSc6rf3fYieS2poYEY0t_ujbwgF9S5Q,331
+numpy/typing/tests/data/pass/random.py,sha256=UJF6epKYGfGq9QlrR9YuA7EK_mI8AQ2osdA4Uhsh1ms,61824
+numpy/typing/tests/data/pass/recfunctions.py,sha256=GwDirrHsL3upfIsAEZakPt95-RLY7BpXqU_KXxi4HhQ,5003
+numpy/typing/tests/data/pass/scalars.py,sha256=pzV3Y20dd6xB9NRsJ0YSdkcvI5XcD8cEWtEo1KTL1SU,3724
+numpy/typing/tests/data/pass/shape.py,sha256=L2iugxTnbm8kmBpaJVYpURKJEAnI7TH2KtuYeqNR9co,445
+numpy/typing/tests/data/pass/simple.py,sha256=lPj620zkTA8Sg893eu2mGuj-Xq2BGZ_1dcmfsVDkz8g,2751
+numpy/typing/tests/data/pass/simple_py3.py,sha256=HuLrc5aphThQkLjU2_19KgGFaXwKOfSzXe0p2xMm8ZI,96
+numpy/typing/tests/data/pass/ufunc_config.py,sha256=uzXOhCl9N4LPV9hV2Iqg_skgkKMbBPBF0GXPU9EMeuE,1205
+numpy/typing/tests/data/pass/ufunclike.py,sha256=U4Aay11VALvm22bWEX0eDWuN5qxJlg_hH5IpOL62M3I,1125
+numpy/typing/tests/data/pass/ufuncs.py,sha256=1Rem_geEm4qyD3XaRA1NAPKwr3YjRq68zbIlC_Xhi9M,422
+numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=ETLZkDTGpZspvwjVYAZlnA1gH4PJ4bSY5PkWyxTjusU,161
+numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=phWM8Fz30fe--KkKI8S9voIbDNHbxIKSzLwRWwvJ7yU,27424
+numpy/typing/tests/data/reveal/array_api_info.pyi,sha256=oWKW0yGS9xKcLZnH2QeeixMBcI74dNIcwZr0bwGmDVM,3017
+numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=fJZwsHVQS-_sEMo6qsLKyKyxuQoGvCeW8TF3xUzv_rw,13041
+numpy/typing/tests/data/reveal/arraypad.pyi,sha256=Dg5ss1cDS_QiNT4YEheHXMa2beM4qBTUb1mq-REkh6A,653
+numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=iUHzZaUrYFGC9QBCxhiEAIJODeqGwG7VCv875il-9gY,777
+numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=Hhe49rLgj0P8SXElncNvLeCv1OqdI-iryB_673w7vL4,4411
+numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=QPRyZzHFmti4HlrJ315dgzBjaet8LqM9il-8uc9e2P8,1039
+numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=TjW0vMyXqUy-WoEIMA3AMN_u4IGw5RosOWK_qHMNjes,4911
+numpy/typing/tests/data/reveal/char.pyi,sha256=9QbiMbkKycnZl4f4eKBoF_rAxIUIv3vBcOQyksHJCug,11470
+numpy/typing/tests/data/reveal/chararray.pyi,sha256=4oqRNZt7jIdfbNVgcsWPDVVFrrEYhqjAExaNzPya_lY,5199
+numpy/typing/tests/data/reveal/comparisons.pyi,sha256=mXRfm3ZUsk8YbSPg9ugPSWLGRwzUVy4BEVN7q4K56tc,7195
+numpy/typing/tests/data/reveal/constants.pyi,sha256=AazwlvF--Te1dt35f8lkDLNuo3jQXqmGvddDQ37jAE0,333
+numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=U9ZO5GnGHxVyv-OWRYWHSXctH7LGHPWDdyNVl_saQEQ,4134
+numpy/typing/tests/data/reveal/datasource.pyi,sha256=B9nCoOPE4fJvBIeInAgUCg5pIsr8IYOu_iToqt6n-Nc,583
+numpy/typing/tests/data/reveal/dtype.pyi,sha256=IdxNE3NIE0YKpVw4yI9lS-wWPmeFyfGCW2V0oyor4zk,5080
+numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=qPYk5W3lardDdgsQIGyu356iIGDnb0P38UKQDXWQlrk,1926
+numpy/typing/tests/data/reveal/emath.pyi,sha256=fcf0-GftYRByfJFuZC-MvzHlQU4A-f9-kPnxzQt48E0,2125
+numpy/typing/tests/data/reveal/fft.pyi,sha256=uZOJ0ljmmnejfPEwMsfUGDb52NOuTh7Npl7ONwx-Y2k,1601
+numpy/typing/tests/data/reveal/flatiter.pyi,sha256=ZxgdgbRWYXlyxlPOXJzZSHvALqGsK3aV4lf9RePghdA,1347
+numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=xweKmm6uKVgJF4-AwtM6hGEI_YHosu-8jXnd8yjSfJ4,15066
+numpy/typing/tests/data/reveal/getlimits.pyi,sha256=mH0kk94VBu-O5ZzA1nki80jttDK_EBGOsLQOZo3Rq18,1547
+numpy/typing/tests/data/reveal/histograms.pyi,sha256=Mr7P7JYMWF9jM6w5othyzh8CN3ygd2A-WRoB4jImnzk,1257
+numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=4dvG8RXY5ktKXo1uC_pfPHXBDd7tatTbjCs8xr8M2os,3241
+numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=LMCyduuUjX1E7ruBI-B_cEJQ_rUt9ZO21ck22_OLa_c,10112
+numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=CrG0zxbY-HddD7D93q5Cow6c_3mx3nVb1ZCcAq5mC4U,5660
+numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=oQCay2NF8pYHD5jNgRZKNjn8uJW4TJqUPIlytOwDSi0,436
+numpy/typing/tests/data/reveal/lib_version.pyi,sha256=y4ZJSLEeS273Zd6fqaE2XNdczTS0-cwIJ2Yn_4Otm44,572
+numpy/typing/tests/data/reveal/linalg.pyi,sha256=w8RdvwTSt40PMQDvlt_tnky4Cu9LnTUXAmdFhZORPpc,5933
+numpy/typing/tests/data/reveal/ma.pyi,sha256=5FCR2aqUpKOtoQcazro_5C-NE2MrywouDrMHirVyHF0,16223
+numpy/typing/tests/data/reveal/matrix.pyi,sha256=ntknd4qkGbaBMMzPlkTeahyg_H8_TDBJQDbd36a_QfY,3040
+numpy/typing/tests/data/reveal/memmap.pyi,sha256=OCcEhR5mvvXk4UhF6lRqmkxU2NcAqJ4nqAuBpcroQ1g,719
+numpy/typing/tests/data/reveal/mod.pyi,sha256=9nJnn1rA_4mbk2JSYyDmQ5pMWWQ9vPDDzWqijlFAG4I,7599
+numpy/typing/tests/data/reveal/modules.pyi,sha256=_Gvxgql5KbJFL1Mj5gFAphzyGC44AkuNZLnYkv-3LRA,1858
+numpy/typing/tests/data/reveal/multiarray.pyi,sha256=oz81sV4JUBbd6memodStUpT11TARzqRXWUs4H0cU-YA,7779
+numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=9OqWKUGRGCIt-mywzDmZExTOsM7l3JGw0YAPB9rs_8k,687
+numpy/typing/tests/data/reveal/ndarray_assignability.pyi,sha256=KOl5ActvtUx6h1oTQT3c0EiU5eCDbMD1okQVfxpc4j0,2668
+numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=SAI9kxMNl66L8n7kO3jn7-EL_3Ygn46behqD_dVa5Hw,3309
+numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=8jwi9O-iGcojU0xSF_GUYMFRpkRdol5hQza0hkziNXc,8663
+numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=z8SRTWdl6fSj_ENNF-M5jZnujUl1180WaFMAanXqCVw,1394
+numpy/typing/tests/data/reveal/nditer.pyi,sha256=yih7UE0OynR7GuVCGgwhzjTjARwOXikDe6Dr4ymRC2g,1898
+numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=Z2vwweUjoqxR0zUUldOUXsg6mkDfDP1BMyFV2hje5Z8,612
+numpy/typing/tests/data/reveal/npyio.pyi,sha256=p6jJFmcwXuQhshYC70zhg_itI1kLiDu9saUCNwYpFNo,3493
+numpy/typing/tests/data/reveal/numeric.pyi,sha256=0hvPN803QJoO38lYY68of7M-1KGXqdgHy9RdqcHwO-M,5869
+numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=4lnZQTgVtig8UuDwuETyQ6jRFxsYv6tnni2ZJaDyMM0,1331
+numpy/typing/tests/data/reveal/polynomial_polybase.pyi,sha256=V7ulOvXuAcduWTD_7Jg1yPCLvROq8E-10GobfNlKXD8,7925
+numpy/typing/tests/data/reveal/polynomial_polyutils.pyi,sha256=I_4waxJEeUsp5pjnbBN55kqZ2kycK8akD_XvhsgsCGY,10642
+numpy/typing/tests/data/reveal/polynomial_series.pyi,sha256=YowKiIaDd2Je0PjEmXDINUXe4il0r4KDkpzDbYpwG38,6853
+numpy/typing/tests/data/reveal/random.pyi,sha256=xXJobSp5nVBelmrBO_OTvV8XQnbnZjbAyJfrRwlJshg,104296
+numpy/typing/tests/data/reveal/rec.pyi,sha256=E8lxkOQ4qSwwX20Y4d438s5g-kTnNARsZc4f-Y8OhZo,3378
+numpy/typing/tests/data/reveal/scalars.pyi,sha256=5s5Xm1HoA6bwwqK4gfEWqoNk45dAQvxAZLZc2zUhe3A,6378
+numpy/typing/tests/data/reveal/shape.pyi,sha256=ZT6e5LW4nU90tA-Av5NLiyoaPW9NIX_XkWJ-LOOzh84,262
+numpy/typing/tests/data/reveal/shape_base.pyi,sha256=xbnt0jps1djVxVMn4Lj8bxGl-mGvbhqSKFVWYcFApLg,2006
+numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=Cm9P_F7promu0zGZmo957SOFCZ6Np8wSv5ecR_hB668,1315
+numpy/typing/tests/data/reveal/strings.pyi,sha256=WvSd8xHIdxQdah3Q0ZJUva79jfVngB3UD9yb6awDW8w,9547
+numpy/typing/tests/data/reveal/testing.pyi,sha256=vP3uEWEdFHrfv_Q4OaJ0Oo5gUqUxkkIRVjvJMsqiHs8,8443
+numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=TiBbWXI0xRCgk0bE-Bd4ZryWaLeJIQ5I-6KBjIVoMuE,4237
+numpy/typing/tests/data/reveal/type_check.pyi,sha256=W7rJUEf_iwI0D1FIVjhCEfzIjw_T04qcBYFxuPwnXAo,2392
+numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=XoD9fxaMVCGgyMncWKIJssFBO0SmndHsDs0hDXS04A8,1162
+numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=0jwIYSgXn0usVGkzyZz0ttO5tSYfWMYu_U2ByqrzuRQ,1183
+numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=2IYvfPlLCuqgoyNKzbcv3mr-Dva2cyUSWtBWuM77sDk,4789
+numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=5qqRFzPOon1GhU_i5CHDxQLPKVcO2EMhbc851V8Gusc,449
+numpy/typing/tests/test_isfile.py,sha256=yaRIX3JLmwY1cgD-xxKvJjMVVBRmv9QNSXx9kQSoVAc,878
+numpy/typing/tests/test_runtime.py,sha256=YHS0Hgv1v3cip7C14UcsJWLGI37m18MqXrwLmb88Ctc,2919
+numpy/typing/tests/test_typing.py,sha256=VERPf6NJ6gRLoKk0ki-s1wvDS4E--InjNUaj63_Q-00,6289
+numpy/version.py,sha256=AjMIRnThSIMGkfWDWXTf4QreFJAZR5RGTsjucRoRXx8,293
+numpy/version.pyi,sha256=x3oCrDqM_gQhitdDgfgMhJ-UPabIXk5etqBq8HUwUok,358
diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/WHEEL
new file mode 100644
index 00000000..227a38cf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: meson
+Root-Is-Purelib: false
+Tag: cp312-cp312-manylinux_2_27_x86_64
+Tag: cp312-cp312-manylinux_2_28_x86_64
+
diff --git a/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/entry_points.txt b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/entry_points.txt
new file mode 100644
index 00000000..48c4f643
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy-2.3.3.dist-info/entry_points.txt
@@ -0,0 +1,13 @@
+[pkg_config]
+numpy = numpy._core.lib.pkgconfig
+
+[array_api]
+numpy = numpy
+
+[pyinstaller40]
+hook-dirs = numpy:_pyinstaller_hooks_dir
+
+[console_scripts]
+f2py = numpy.f2py.f2py2e:main
+numpy-config = numpy._configtool:main
+
diff --git a/.venv/lib/python3.12/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 b/.venv/lib/python3.12/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0
new file mode 100755
index 00000000..f00c303d
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 differ
diff --git a/.venv/lib/python3.12/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 b/.venv/lib/python3.12/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0
new file mode 100755
index 00000000..b6063a05
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 differ
diff --git a/.venv/lib/python3.12/site-packages/numpy.libs/libscipy_openblas64_-8fb3d286.so b/.venv/lib/python3.12/site-packages/numpy.libs/libscipy_openblas64_-8fb3d286.so
new file mode 100755
index 00000000..d6449096
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy.libs/libscipy_openblas64_-8fb3d286.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__config__.py b/.venv/lib/python3.12/site-packages/numpy/__config__.py
new file mode 100644
index 00000000..cbe4a4e2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/__config__.py
@@ -0,0 +1,170 @@
+# This file is generated by numpy's build process
+# It contains system_info results at the time of building this package.
+from enum import Enum
+from numpy._core._multiarray_umath import (
+ __cpu_features__,
+ __cpu_baseline__,
+ __cpu_dispatch__,
+)
+
+__all__ = ["show_config"]
+_built_with_meson = True
+
+
+class DisplayModes(Enum):
+ stdout = "stdout"
+ dicts = "dicts"
+
+
+def _cleanup(d):
+ """
+ Removes empty values in a `dict` recursively
+ This ensures we remove values that Meson could not provide to CONFIG
+ """
+ if isinstance(d, dict):
+ return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)}
+ else:
+ return d
+
+
+CONFIG = _cleanup(
+ {
+ "Compilers": {
+ "c": {
+ "name": "gcc",
+ "linker": r"ld.bfd",
+ "version": "14.2.1",
+ "commands": r"cc",
+ "args": r"",
+ "linker args": r"",
+ },
+ "cython": {
+ "name": "cython",
+ "linker": r"cython",
+ "version": "3.1.3",
+ "commands": r"cython",
+ "args": r"",
+ "linker args": r"",
+ },
+ "c++": {
+ "name": "gcc",
+ "linker": r"ld.bfd",
+ "version": "14.2.1",
+ "commands": r"c++",
+ "args": r"",
+ "linker args": r"",
+ },
+ },
+ "Machine Information": {
+ "host": {
+ "cpu": "x86_64",
+ "family": "x86_64",
+ "endian": "little",
+ "system": "linux",
+ },
+ "build": {
+ "cpu": "x86_64",
+ "family": "x86_64",
+ "endian": "little",
+ "system": "linux",
+ },
+ "cross-compiled": bool("False".lower().replace("false", "")),
+ },
+ "Build Dependencies": {
+ "blas": {
+ "name": "scipy-openblas",
+ "found": bool("True".lower().replace("false", "")),
+ "version": "0.3.30",
+ "detection method": "pkgconfig",
+ "include directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/include",
+ "lib directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/lib",
+ "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64",
+ "pc file directory": r"/project/.openblas",
+ },
+ "lapack": {
+ "name": "scipy-openblas",
+ "found": bool("True".lower().replace("false", "")),
+ "version": "0.3.30",
+ "detection method": "pkgconfig",
+ "include directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/include",
+ "lib directory": r"/opt/_internal/cpython-3.12.11/lib/python3.12/site-packages/scipy_openblas64/lib",
+ "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64",
+ "pc file directory": r"/project/.openblas",
+ },
+ },
+ "Python Information": {
+ "path": r"/tmp/build-env-w4xgk5bb/bin/python",
+ "version": "3.12",
+ },
+ "SIMD Extensions": {
+ "baseline": __cpu_baseline__,
+ "found": [
+ feature for feature in __cpu_dispatch__ if __cpu_features__[feature]
+ ],
+ "not found": [
+ feature for feature in __cpu_dispatch__ if not __cpu_features__[feature]
+ ],
+ },
+ }
+)
+
+
+def _check_pyyaml():
+ import yaml
+
+ return yaml
+
+
+def show(mode=DisplayModes.stdout.value):
+ """
+ Show libraries and system information on which NumPy was built
+ and is being used
+
+ Parameters
+ ----------
+ mode : {`'stdout'`, `'dicts'`}, optional.
+ Indicates how to display the config information.
+ `'stdout'` prints to console, `'dicts'` returns a dictionary
+ of the configuration.
+
+ Returns
+ -------
+ out : {`dict`, `None`}
+ If mode is `'dicts'`, a dict is returned, else None
+
+ See Also
+ --------
+ get_include : Returns the directory containing NumPy C
+ header files.
+
+ Notes
+ -----
+ 1. The `'stdout'` mode will give more readable
+ output if ``pyyaml`` is installed
+
+ """
+ if mode == DisplayModes.stdout.value:
+ try: # Non-standard library, check import
+ yaml = _check_pyyaml()
+
+ print(yaml.dump(CONFIG))
+ except ModuleNotFoundError:
+ import warnings
+ import json
+
+ warnings.warn("Install `pyyaml` for better output", stacklevel=1)
+ print(json.dumps(CONFIG, indent=2))
+ elif mode == DisplayModes.dicts.value:
+ return CONFIG
+ else:
+ raise AttributeError(
+ f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}"
+ )
+
+
+def show_config(mode=DisplayModes.stdout.value):
+ return show(mode)
+
+
+show_config.__doc__ = show.__doc__
+show_config.__module__ = "numpy"
diff --git a/.venv/lib/python3.12/site-packages/numpy/__config__.pyi b/.venv/lib/python3.12/site-packages/numpy/__config__.pyi
new file mode 100644
index 00000000..b59bdcd2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/__config__.pyi
@@ -0,0 +1,102 @@
+from enum import Enum
+from types import ModuleType
+from typing import Final, NotRequired, TypedDict, overload, type_check_only
+from typing import Literal as L
+
+_CompilerConfigDictValue = TypedDict(
+ "_CompilerConfigDictValue",
+ {
+ "name": str,
+ "linker": str,
+ "version": str,
+ "commands": str,
+ "args": str,
+ "linker args": str,
+ },
+)
+_CompilerConfigDict = TypedDict(
+ "_CompilerConfigDict",
+ {
+ "c": _CompilerConfigDictValue,
+ "cython": _CompilerConfigDictValue,
+ "c++": _CompilerConfigDictValue,
+ },
+)
+_MachineInformationDict = TypedDict(
+ "_MachineInformationDict",
+ {
+ "host": _MachineInformationDictValue,
+ "build": _MachineInformationDictValue,
+ "cross-compiled": NotRequired[L[True]],
+ },
+)
+
+@type_check_only
+class _MachineInformationDictValue(TypedDict):
+ cpu: str
+ family: str
+ endian: L["little", "big"]
+ system: str
+
+_BuildDependenciesDictValue = TypedDict(
+ "_BuildDependenciesDictValue",
+ {
+ "name": str,
+ "found": NotRequired[L[True]],
+ "version": str,
+ "include directory": str,
+ "lib directory": str,
+ "openblas configuration": str,
+ "pc file directory": str,
+ },
+)
+
+class _BuildDependenciesDict(TypedDict):
+ blas: _BuildDependenciesDictValue
+ lapack: _BuildDependenciesDictValue
+
+class _PythonInformationDict(TypedDict):
+ path: str
+ version: str
+
+_SIMDExtensionsDict = TypedDict(
+ "_SIMDExtensionsDict",
+ {
+ "baseline": list[str],
+ "found": list[str],
+ "not found": list[str],
+ },
+)
+
+_ConfigDict = TypedDict(
+ "_ConfigDict",
+ {
+ "Compilers": _CompilerConfigDict,
+ "Machine Information": _MachineInformationDict,
+ "Build Dependencies": _BuildDependenciesDict,
+ "Python Information": _PythonInformationDict,
+ "SIMD Extensions": _SIMDExtensionsDict,
+ },
+)
+
+###
+
+__all__ = ["show_config"]
+
+CONFIG: Final[_ConfigDict] = ...
+
+class DisplayModes(Enum):
+ stdout = "stdout"
+ dicts = "dicts"
+
+def _check_pyyaml() -> ModuleType: ...
+
+@overload
+def show(mode: L["stdout"] = "stdout") -> None: ...
+@overload
+def show(mode: L["dicts"]) -> _ConfigDict: ...
+
+@overload
+def show_config(mode: L["stdout"] = "stdout") -> None: ...
+@overload
+def show_config(mode: L["dicts"]) -> _ConfigDict: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.cython-30.pxd b/.venv/lib/python3.12/site-packages/numpy/__init__.cython-30.pxd
new file mode 100644
index 00000000..86c91cf6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/__init__.cython-30.pxd
@@ -0,0 +1,1241 @@
+# NumPy static imports for Cython >= 3.0
+#
+# If any of the PyArray_* functions are called, import_array must be
+# called first. This is done automatically by Cython 3.0+ if a call
+# is not detected inside of the module.
+#
+# Author: Dag Sverre Seljebotn
+#
+
+from cpython.ref cimport Py_INCREF
+from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck
+cimport libc.stdio as stdio
+
+
+cdef extern from *:
+ # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython.
+ # See https://github.com/cython/cython/issues/3573
+ """
+ /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */
+ """
+
+
+cdef extern from "numpy/arrayobject.h":
+ # It would be nice to use size_t and ssize_t, but ssize_t has special
+ # implicit conversion rules, so just use "long".
+ # Note: The actual type only matters for Cython promotion, so long
+ # is closer than int, but could lead to incorrect promotion.
+ # (Not to worrying, and always the status-quo.)
+ ctypedef signed long npy_intp
+ ctypedef unsigned long npy_uintp
+
+ ctypedef unsigned char npy_bool
+
+ ctypedef signed char npy_byte
+ ctypedef signed short npy_short
+ ctypedef signed int npy_int
+ ctypedef signed long npy_long
+ ctypedef signed long long npy_longlong
+
+ ctypedef unsigned char npy_ubyte
+ ctypedef unsigned short npy_ushort
+ ctypedef unsigned int npy_uint
+ ctypedef unsigned long npy_ulong
+ ctypedef unsigned long long npy_ulonglong
+
+ ctypedef float npy_float
+ ctypedef double npy_double
+ ctypedef long double npy_longdouble
+
+ ctypedef signed char npy_int8
+ ctypedef signed short npy_int16
+ ctypedef signed int npy_int32
+ ctypedef signed long long npy_int64
+
+ ctypedef unsigned char npy_uint8
+ ctypedef unsigned short npy_uint16
+ ctypedef unsigned int npy_uint32
+ ctypedef unsigned long long npy_uint64
+
+ ctypedef float npy_float32
+ ctypedef double npy_float64
+ ctypedef long double npy_float80
+ ctypedef long double npy_float96
+ ctypedef long double npy_float128
+
+ ctypedef struct npy_cfloat:
+ pass
+
+ ctypedef struct npy_cdouble:
+ pass
+
+ ctypedef struct npy_clongdouble:
+ pass
+
+ ctypedef struct npy_complex64:
+ pass
+
+ ctypedef struct npy_complex128:
+ pass
+
+ ctypedef struct npy_complex160:
+ pass
+
+ ctypedef struct npy_complex192:
+ pass
+
+ ctypedef struct npy_complex256:
+ pass
+
+ ctypedef struct PyArray_Dims:
+ npy_intp *ptr
+ int len
+
+
+ cdef enum NPY_TYPES:
+ NPY_BOOL
+ NPY_BYTE
+ NPY_UBYTE
+ NPY_SHORT
+ NPY_USHORT
+ NPY_INT
+ NPY_UINT
+ NPY_LONG
+ NPY_ULONG
+ NPY_LONGLONG
+ NPY_ULONGLONG
+ NPY_FLOAT
+ NPY_DOUBLE
+ NPY_LONGDOUBLE
+ NPY_CFLOAT
+ NPY_CDOUBLE
+ NPY_CLONGDOUBLE
+ NPY_OBJECT
+ NPY_STRING
+ NPY_UNICODE
+ NPY_VSTRING
+ NPY_VOID
+ NPY_DATETIME
+ NPY_TIMEDELTA
+ NPY_NTYPES_LEGACY
+ NPY_NOTYPE
+
+ NPY_INT8
+ NPY_INT16
+ NPY_INT32
+ NPY_INT64
+ NPY_UINT8
+ NPY_UINT16
+ NPY_UINT32
+ NPY_UINT64
+ NPY_FLOAT16
+ NPY_FLOAT32
+ NPY_FLOAT64
+ NPY_FLOAT80
+ NPY_FLOAT96
+ NPY_FLOAT128
+ NPY_COMPLEX64
+ NPY_COMPLEX128
+ NPY_COMPLEX160
+ NPY_COMPLEX192
+ NPY_COMPLEX256
+
+ NPY_INTP
+ NPY_UINTP
+ NPY_DEFAULT_INT # Not a compile time constant (normally)!
+
+ ctypedef enum NPY_ORDER:
+ NPY_ANYORDER
+ NPY_CORDER
+ NPY_FORTRANORDER
+ NPY_KEEPORDER
+
+ ctypedef enum NPY_CASTING:
+ NPY_NO_CASTING
+ NPY_EQUIV_CASTING
+ NPY_SAFE_CASTING
+ NPY_SAME_KIND_CASTING
+ NPY_UNSAFE_CASTING
+
+ ctypedef enum NPY_CLIPMODE:
+ NPY_CLIP
+ NPY_WRAP
+ NPY_RAISE
+
+ ctypedef enum NPY_SCALARKIND:
+ NPY_NOSCALAR,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+
+ ctypedef enum NPY_SORTKIND:
+ NPY_QUICKSORT
+ NPY_HEAPSORT
+ NPY_MERGESORT
+
+ ctypedef enum NPY_SEARCHSIDE:
+ NPY_SEARCHLEFT
+ NPY_SEARCHRIGHT
+
+ enum:
+ NPY_ARRAY_C_CONTIGUOUS
+ NPY_ARRAY_F_CONTIGUOUS
+ NPY_ARRAY_OWNDATA
+ NPY_ARRAY_FORCECAST
+ NPY_ARRAY_ENSURECOPY
+ NPY_ARRAY_ENSUREARRAY
+ NPY_ARRAY_ELEMENTSTRIDES
+ NPY_ARRAY_ALIGNED
+ NPY_ARRAY_NOTSWAPPED
+ NPY_ARRAY_WRITEABLE
+ NPY_ARRAY_WRITEBACKIFCOPY
+
+ NPY_ARRAY_BEHAVED
+ NPY_ARRAY_BEHAVED_NS
+ NPY_ARRAY_CARRAY
+ NPY_ARRAY_CARRAY_RO
+ NPY_ARRAY_FARRAY
+ NPY_ARRAY_FARRAY_RO
+ NPY_ARRAY_DEFAULT
+
+ NPY_ARRAY_IN_ARRAY
+ NPY_ARRAY_OUT_ARRAY
+ NPY_ARRAY_INOUT_ARRAY
+ NPY_ARRAY_IN_FARRAY
+ NPY_ARRAY_OUT_FARRAY
+ NPY_ARRAY_INOUT_FARRAY
+
+ NPY_ARRAY_UPDATE_ALL
+
+ cdef enum:
+ NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x
+ NPY_RAVEL_AXIS # Used for functions like PyArray_Mean
+
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
+
+ ctypedef struct PyArray_ArrayDescr:
+ # shape is a tuple, but Cython doesn't support "tuple shape"
+ # inside a non-PyObject declaration, so we have to declare it
+ # as just a PyObject*.
+ PyObject* shape
+
+ ctypedef struct PyArray_Descr:
+ pass
+
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined.
+ cdef PyTypeObject* typeobj
+ cdef char kind
+ cdef char type
+ # Numpy sometimes mutates this without warning (e.g. it'll
+ # sometimes change "|" to "<" in shared dtype objects on
+ # little-endian machines). If this matters to you, use
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
+ # directly accessing this field.
+ cdef char byteorder
+ cdef int type_num
+
+ @property
+ cdef inline npy_intp itemsize(self) noexcept nogil:
+ return PyDataType_ELSIZE(self)
+
+ @property
+ cdef inline npy_intp alignment(self) noexcept nogil:
+ return PyDataType_ALIGNMENT(self)
+
+ # Use fields/names with care as they may be NULL. You must check
+ # for this using PyDataType_HASFIELDS.
+ @property
+ cdef inline object fields(self):
+ return PyDataType_FIELDS(self)
+
+ @property
+ cdef inline tuple names(self):
+ return PyDataType_NAMES(self)
+
+ # Use PyDataType_HASSUBARRAY to test whether this field is
+ # valid (the pointer can be NULL). Most users should access
+ # this field via the inline helper method PyDataType_SHAPE.
+ @property
+ cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil:
+ return PyDataType_SUBARRAY(self)
+
+ @property
+ cdef inline npy_uint64 flags(self) noexcept nogil:
+ """The data types flags."""
+ return PyDataType_FLAGS(self)
+
+
+ ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
+
+ @property
+ cdef inline int numiter(self) noexcept nogil:
+ """The number of arrays that need to be broadcast to the same shape."""
+ return PyArray_MultiIter_NUMITER(self)
+
+ @property
+ cdef inline npy_intp size(self) noexcept nogil:
+ """The total broadcasted size."""
+ return PyArray_MultiIter_SIZE(self)
+
+ @property
+ cdef inline npy_intp index(self) noexcept nogil:
+ """The current (1-d) index into the broadcasted result."""
+ return PyArray_MultiIter_INDEX(self)
+
+ @property
+ cdef inline int nd(self) noexcept nogil:
+ """The number of dimensions in the broadcasted result."""
+ return PyArray_MultiIter_NDIM(self)
+
+ @property
+ cdef inline npy_intp* dimensions(self) noexcept nogil:
+ """The shape of the broadcasted result."""
+ return PyArray_MultiIter_DIMS(self)
+
+ @property
+ cdef inline void** iters(self) noexcept nogil:
+ """An array of iterator objects that holds the iterators for the arrays to be broadcast together.
+ On return, the iterators are adjusted for broadcasting."""
+ return PyArray_MultiIter_ITERS(self)
+
+
+ ctypedef struct PyArrayObject:
+ # For use in situations where ndarray can't replace PyArrayObject*,
+ # like PyArrayObject**.
+ pass
+
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
+
+ # NOTE: no field declarations since direct access is deprecated since NumPy 1.7
+ # Instead, we use properties that map to the corresponding C-API functions.
+
+ @property
+ cdef inline PyObject* base(self) noexcept nogil:
+ """Returns a borrowed reference to the object owning the data/memory.
+ """
+ return PyArray_BASE(self)
+
+ @property
+ cdef inline dtype descr(self):
+ """Returns an owned reference to the dtype of the array.
+ """
+ return PyArray_DESCR(self)
+
+ @property
+ cdef inline int ndim(self) noexcept nogil:
+ """Returns the number of dimensions in the array.
+ """
+ return PyArray_NDIM(self)
+
+ @property
+ cdef inline npy_intp *shape(self) noexcept nogil:
+ """Returns a pointer to the dimensions/shape of the array.
+ The number of elements matches the number of dimensions of the array (ndim).
+ Can return NULL for 0-dimensional arrays.
+ """
+ return PyArray_DIMS(self)
+
+ @property
+ cdef inline npy_intp *strides(self) noexcept nogil:
+ """Returns a pointer to the strides of the array.
+ The number of elements matches the number of dimensions of the array (ndim).
+ """
+ return PyArray_STRIDES(self)
+
+ @property
+ cdef inline npy_intp size(self) noexcept nogil:
+ """Returns the total size (in number of elements) of the array.
+ """
+ return PyArray_SIZE(self)
+
+ @property
+ cdef inline char* data(self) noexcept nogil:
+ """The pointer to the data buffer as a char*.
+ This is provided for legacy reasons to avoid direct struct field access.
+ For new code that needs this access, you probably want to cast the result
+ of `PyArray_DATA()` instead, which returns a 'void*'.
+ """
+ return PyArray_BYTES(self)
+
+
+ int _import_array() except -1
+ # A second definition so _import_array isn't marked as used when we use it here.
+ # Do not use - subject to change any time.
+ int __pyx_import_array "_import_array"() except -1
+
+ #
+ # Macros from ndarrayobject.h
+ #
+ bint PyArray_CHKFLAGS(ndarray m, int flags) nogil
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_ISCONTIGUOUS(ndarray m) nogil
+ bint PyArray_ISWRITEABLE(ndarray m) nogil
+ bint PyArray_ISALIGNED(ndarray m) nogil
+
+ int PyArray_NDIM(ndarray) nogil
+ bint PyArray_ISONESEGMENT(ndarray) nogil
+ bint PyArray_ISFORTRAN(ndarray) nogil
+ int PyArray_FORTRANIF(ndarray) nogil
+
+ void* PyArray_DATA(ndarray) nogil
+ char* PyArray_BYTES(ndarray) nogil
+
+ npy_intp* PyArray_DIMS(ndarray) nogil
+ npy_intp* PyArray_STRIDES(ndarray) nogil
+ npy_intp PyArray_DIM(ndarray, size_t) nogil
+ npy_intp PyArray_STRIDE(ndarray, size_t) nogil
+
+ PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype!
+ PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr.
+ int PyArray_FLAGS(ndarray) nogil
+ void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
+ void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
+ npy_intp PyArray_ITEMSIZE(ndarray) nogil
+ int PyArray_TYPE(ndarray arr) nogil
+
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1
+
+ bint PyTypeNum_ISBOOL(int) nogil
+ bint PyTypeNum_ISUNSIGNED(int) nogil
+ bint PyTypeNum_ISSIGNED(int) nogil
+ bint PyTypeNum_ISINTEGER(int) nogil
+ bint PyTypeNum_ISFLOAT(int) nogil
+ bint PyTypeNum_ISNUMBER(int) nogil
+ bint PyTypeNum_ISSTRING(int) nogil
+ bint PyTypeNum_ISCOMPLEX(int) nogil
+ bint PyTypeNum_ISFLEXIBLE(int) nogil
+ bint PyTypeNum_ISUSERDEF(int) nogil
+ bint PyTypeNum_ISEXTENDED(int) nogil
+ bint PyTypeNum_ISOBJECT(int) nogil
+
+ npy_intp PyDataType_ELSIZE(dtype) nogil
+ npy_intp PyDataType_ALIGNMENT(dtype) nogil
+ PyObject* PyDataType_METADATA(dtype) nogil
+ PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil
+ PyObject* PyDataType_NAMES(dtype) nogil
+ PyObject* PyDataType_FIELDS(dtype) nogil
+
+ bint PyDataType_ISBOOL(dtype) nogil
+ bint PyDataType_ISUNSIGNED(dtype) nogil
+ bint PyDataType_ISSIGNED(dtype) nogil
+ bint PyDataType_ISINTEGER(dtype) nogil
+ bint PyDataType_ISFLOAT(dtype) nogil
+ bint PyDataType_ISNUMBER(dtype) nogil
+ bint PyDataType_ISSTRING(dtype) nogil
+ bint PyDataType_ISCOMPLEX(dtype) nogil
+ bint PyDataType_ISFLEXIBLE(dtype) nogil
+ bint PyDataType_ISUSERDEF(dtype) nogil
+ bint PyDataType_ISEXTENDED(dtype) nogil
+ bint PyDataType_ISOBJECT(dtype) nogil
+ bint PyDataType_HASFIELDS(dtype) nogil
+ bint PyDataType_HASSUBARRAY(dtype) nogil
+ npy_uint64 PyDataType_FLAGS(dtype) nogil
+
+ bint PyArray_ISBOOL(ndarray) nogil
+ bint PyArray_ISUNSIGNED(ndarray) nogil
+ bint PyArray_ISSIGNED(ndarray) nogil
+ bint PyArray_ISINTEGER(ndarray) nogil
+ bint PyArray_ISFLOAT(ndarray) nogil
+ bint PyArray_ISNUMBER(ndarray) nogil
+ bint PyArray_ISSTRING(ndarray) nogil
+ bint PyArray_ISCOMPLEX(ndarray) nogil
+ bint PyArray_ISFLEXIBLE(ndarray) nogil
+ bint PyArray_ISUSERDEF(ndarray) nogil
+ bint PyArray_ISEXTENDED(ndarray) nogil
+ bint PyArray_ISOBJECT(ndarray) nogil
+ bint PyArray_HASFIELDS(ndarray) nogil
+
+ bint PyArray_ISVARIABLE(ndarray) nogil
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil
+ bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray) nogil
+ bint PyArray_ISBYTESWAPPED(ndarray) nogil
+
+ bint PyArray_FLAGSWAP(ndarray, int) nogil
+
+ bint PyArray_ISCARRAY(ndarray) nogil
+ bint PyArray_ISCARRAY_RO(ndarray) nogil
+ bint PyArray_ISFARRAY(ndarray) nogil
+ bint PyArray_ISFARRAY_RO(ndarray) nogil
+ bint PyArray_ISBEHAVED(ndarray) nogil
+ bint PyArray_ISBEHAVED_RO(ndarray) nogil
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype) nogil
+ bint PyDataType_ISBYTESWAPPED(dtype) nogil
+
+ bint PyArray_DescrCheck(object)
+
+ bint PyArray_Check(object)
+ bint PyArray_CheckExact(object)
+
+ # Cannot be supported due to out arg:
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
+ # bint PyArray_HasArrayInterface(op, out)
+
+
+ bint PyArray_IsZeroDim(object)
+ # Cannot be supported due to ## ## in macro:
+ # bint PyArray_IsScalar(object, verbatim work)
+ bint PyArray_CheckScalar(object)
+ bint PyArray_IsPythonNumber(object)
+ bint PyArray_IsPythonScalar(object)
+ bint PyArray_IsAnyScalar(object)
+ bint PyArray_CheckAnyScalar(object)
+
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray) nogil
+ npy_intp PyArray_SIZE(ndarray) nogil
+ npy_intp PyArray_NBYTES(ndarray) nogil
+
+ object PyArray_FROM_O(object)
+ object PyArray_FROM_OF(object m, int flags)
+ object PyArray_FROM_OT(object m, int type)
+ object PyArray_FROM_OTF(object m, int type, int flags)
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
+ void PyArray_FILLWBYTE(ndarray, int val)
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
+ bint PyArray_EquivByteorders(int b1, int b2) nogil
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
+ object PyArray_ToScalar(void* data, ndarray arr)
+
+ void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
+
+ # Cannot be supported due to out arg
+ # void PyArray_DESCR_REPLACE(descr)
+
+
+ object PyArray_Copy(ndarray)
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
+
+ object PyArray_Cast(ndarray mp, int type_num)
+ object PyArray_Take(ndarray ap, object items, int axis)
+ object PyArray_Put(ndarray ap, object items, object values)
+
+ void PyArray_ITER_RESET(flatiter it) nogil
+ void PyArray_ITER_NEXT(flatiter it) nogil
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
+
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
+ npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil
+ int PyArray_MultiIter_NDIM(broadcast multi) nogil
+ npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil
+ int PyArray_MultiIter_NUMITER(broadcast multi) nogil
+ npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil
+ void** PyArray_MultiIter_ITERS(broadcast multi) nogil
+
+ # Functions from __multiarray_api.h
+
+ # Functions taking dtype and returning object/ndarray are disabled
+ # for now as they steal dtype references. I'm conservative and disable
+ # more than is probably needed until it can be checked further.
+ int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF...
+ int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF...
+ dtype PyArray_DescrFromType (int)
+ object PyArray_TypeObjectFromType (int)
+ char * PyArray_Zero (ndarray)
+ char * PyArray_One (ndarray)
+ #object PyArray_CastToType (ndarray, dtype, int)
+ int PyArray_CanCastSafely (int, int) # writes errors
+ npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors
+ int PyArray_ObjectType (object, int) except 0
+ dtype PyArray_DescrFromObject (object, dtype)
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
+ dtype PyArray_DescrFromScalar (object)
+ dtype PyArray_DescrFromTypeObject (object)
+ npy_intp PyArray_Size (object)
+ #object PyArray_Scalar (void *, dtype, object)
+ #object PyArray_FromScalar (object, dtype)
+ void PyArray_ScalarAsCtype (object, void *)
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
+ object PyArray_EnsureArray (object)
+ object PyArray_EnsureAnyArray (object)
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
+ #object PyArray_FromIter (object, dtype, npy_intp)
+ object PyArray_Return (ndarray)
+ #object PyArray_GetField (ndarray, dtype, int)
+ #int PyArray_SetField (ndarray, dtype, int, object) except -1
+ object PyArray_Byteswap (ndarray, npy_bool)
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
+ int PyArray_CopyInto (ndarray, ndarray) except -1
+ int PyArray_CopyAnyInto (ndarray, ndarray) except -1
+ int PyArray_CopyObject (ndarray, object) except -1
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
+ object PyArray_ToList (ndarray)
+ object PyArray_ToString (ndarray, NPY_ORDER)
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1
+ int PyArray_Dump (object, object, int) except -1
+ object PyArray_Dumps (object, int)
+ int PyArray_ValidType (int) # Cannot error
+ void PyArray_UpdateFlags (ndarray, int)
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
+ #dtype PyArray_DescrNew (dtype)
+ dtype PyArray_DescrNewFromType (int)
+ double PyArray_GetPriority (object, double) # clears errors as of 1.25
+ object PyArray_IterNew (object)
+ object PyArray_MultiIterNew (int, ...)
+
+ int PyArray_PyIntAsInt (object) except? -1
+ npy_intp PyArray_PyIntAsIntp (object)
+ int PyArray_Broadcast (broadcast) except -1
+ int PyArray_FillWithScalar (ndarray, object) except -1
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
+ dtype PyArray_DescrNewByteorder (dtype, char)
+ object PyArray_IterAllButAxis (object, int *)
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
+ #object PyArray_FromArray (ndarray, dtype, int)
+ object PyArray_FromInterface (object)
+ object PyArray_FromStructInterface (object)
+ #object PyArray_FromArrayAttr (object, dtype, object)
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
+ npy_bool PyArray_CanCastScalar (type, type)
+ int PyArray_RemoveSmallest (broadcast) except -1
+ int PyArray_ElementStrides (object)
+ void PyArray_Item_INCREF (char *, dtype) except *
+ void PyArray_Item_XDECREF (char *, dtype) except *
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
+ object PyArray_PutMask (ndarray, object, object)
+ object PyArray_Repeat (ndarray, object, int)
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *)
+ object PyArray_ArgMax (ndarray, int, ndarray)
+ object PyArray_ArgMin (ndarray, int, ndarray)
+ object PyArray_Reshape (ndarray, object)
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
+ object PyArray_Squeeze (ndarray)
+ #object PyArray_View (ndarray, dtype, type)
+ object PyArray_SwapAxes (ndarray, int, int)
+ object PyArray_Max (ndarray, int, ndarray)
+ object PyArray_Min (ndarray, int, ndarray)
+ object PyArray_Ptp (ndarray, int, ndarray)
+ object PyArray_Mean (ndarray, int, int, ndarray)
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
+ object PyArray_Diagonal (ndarray, int, int, int)
+ object PyArray_Clip (ndarray, object, object, ndarray)
+ object PyArray_Conjugate (ndarray, ndarray)
+ object PyArray_Nonzero (ndarray)
+ object PyArray_Std (ndarray, int, int, ndarray, int)
+ object PyArray_Sum (ndarray, int, int, ndarray)
+ object PyArray_CumSum (ndarray, int, int, ndarray)
+ object PyArray_Prod (ndarray, int, int, ndarray)
+ object PyArray_CumProd (ndarray, int, int, ndarray)
+ object PyArray_All (ndarray, int, ndarray)
+ object PyArray_Any (ndarray, int, ndarray)
+ object PyArray_Compress (ndarray, object, int, ndarray)
+ object PyArray_Flatten (ndarray, NPY_ORDER)
+ object PyArray_Ravel (ndarray, NPY_ORDER)
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
+ int PyArray_MultiplyIntList (int *, int)
+ void * PyArray_GetPtr (ndarray, npy_intp*)
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
+ int PyArray_Free (object, void *)
+ #int PyArray_Converter (object, object*)
+ int PyArray_IntpFromSequence (object, npy_intp *, int) except -1
+ object PyArray_Concatenate (object, int)
+ object PyArray_InnerProduct (object, object)
+ object PyArray_MatrixProduct (object, object)
+ object PyArray_Correlate (object, object, int)
+ #int PyArray_DescrConverter (object, dtype*) except 0
+ #int PyArray_DescrConverter2 (object, dtype*) except 0
+ int PyArray_IntpConverter (object, PyArray_Dims *) except 0
+ #int PyArray_BufferConverter (object, chunk) except 0
+ int PyArray_AxisConverter (object, int *) except 0
+ int PyArray_BoolConverter (object, npy_bool *) except 0
+ int PyArray_ByteorderConverter (object, char *) except 0
+ int PyArray_OrderConverter (object, NPY_ORDER *) except 0
+ unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
+ object PyArray_Where (object, object, object)
+ object PyArray_Arange (double, double, double, int)
+ #object PyArray_ArangeObj (object, object, object, dtype)
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0
+ object PyArray_LexSort (object, int)
+ object PyArray_Round (ndarray, int, ndarray)
+ unsigned char PyArray_EquivTypenums (int, int)
+ int PyArray_RegisterDataType (dtype) except -1
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0
+ #int PyArray_OutputConverter (object, ndarray*) except 0
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
+ #int PyArray_DescrAlignConverter (object, dtype*) except 0
+ #int PyArray_DescrAlignConverter2 (object, dtype*) except 0
+ int PyArray_SearchsideConverter (object, void *) except 0
+ object PyArray_CheckAxis (ndarray, int *, int)
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
+ int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead.
+
+ # The memory handler functions require the NumPy 1.22 API
+ # and may require defining NPY_TARGET_VERSION
+ ctypedef struct PyDataMemAllocator:
+ void *ctx
+ void* (*malloc) (void *ctx, size_t size)
+ void* (*calloc) (void *ctx, size_t nelem, size_t elsize)
+ void* (*realloc) (void *ctx, void *ptr, size_t new_size)
+ void (*free) (void *ctx, void *ptr, size_t size)
+
+ ctypedef struct PyDataMem_Handler:
+ char* name
+ npy_uint8 version
+ PyDataMemAllocator allocator
+
+ object PyDataMem_SetHandler(object handler)
+ object PyDataMem_GetHandler()
+
+ # additional datetime related functions are defined below
+
+
+# Typedefs that matches the runtime dtype objects in
+# the numpy module.
+
+# The ones that are commented out needs an IFDEF function
+# in Cython to enable them only on the right systems.
+
+ctypedef npy_int8 int8_t
+ctypedef npy_int16 int16_t
+ctypedef npy_int32 int32_t
+ctypedef npy_int64 int64_t
+
+ctypedef npy_uint8 uint8_t
+ctypedef npy_uint16 uint16_t
+ctypedef npy_uint32 uint32_t
+ctypedef npy_uint64 uint64_t
+
+ctypedef npy_float32 float32_t
+ctypedef npy_float64 float64_t
+#ctypedef npy_float80 float80_t
+#ctypedef npy_float128 float128_t
+
+ctypedef float complex complex64_t
+ctypedef double complex complex128_t
+
+ctypedef npy_longlong longlong_t
+ctypedef npy_ulonglong ulonglong_t
+
+ctypedef npy_intp intp_t
+ctypedef npy_uintp uintp_t
+
+ctypedef npy_double float_t
+ctypedef npy_double double_t
+ctypedef npy_longdouble longdouble_t
+
+ctypedef float complex cfloat_t
+ctypedef double complex cdouble_t
+ctypedef double complex complex_t
+ctypedef long double complex clongdouble_t
+
+cdef inline object PyArray_MultiIterNew1(a):
+ return PyArray_MultiIterNew(1, a)
+
+cdef inline object PyArray_MultiIterNew2(a, b):
+ return PyArray_MultiIterNew(2, a, b)
+
+cdef inline object PyArray_MultiIterNew3(a, b, c):
+ return PyArray_MultiIterNew(3, a, b, c)
+
+cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ return PyArray_MultiIterNew(4, a, b, c, d)
+
+cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ return PyArray_MultiIterNew(5, a, b, c, d, e)
+
+cdef inline tuple PyDataType_SHAPE(dtype d):
+ if PyDataType_HASSUBARRAY(d):
+ return d.subarray.shape
+ else:
+ return ()
+
+
+cdef extern from "numpy/ndarrayobject.h":
+ PyTypeObject PyTimedeltaArrType_Type
+ PyTypeObject PyDatetimeArrType_Type
+ ctypedef int64_t npy_timedelta
+ ctypedef int64_t npy_datetime
+
+cdef extern from "numpy/ndarraytypes.h":
+ ctypedef struct PyArray_DatetimeMetaData:
+ NPY_DATETIMEUNIT base
+ int64_t num
+
+ ctypedef struct npy_datetimestruct:
+ int64_t year
+ int32_t month, day, hour, min, sec, us, ps, as
+
+ # Iterator API added in v1.6
+ #
+ # These don't match the definition in the C API because Cython can't wrap
+ # function pointers that return functions.
+ # https://github.com/cython/cython/issues/6720
+ ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil
+ ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil
+
+
+cdef extern from "numpy/arrayscalars.h":
+
+ # abstract types
+ ctypedef class numpy.generic [object PyObject]:
+ pass
+ ctypedef class numpy.number [object PyObject]:
+ pass
+ ctypedef class numpy.integer [object PyObject]:
+ pass
+ ctypedef class numpy.signedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.unsignedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.inexact [object PyObject]:
+ pass
+ ctypedef class numpy.floating [object PyObject]:
+ pass
+ ctypedef class numpy.complexfloating [object PyObject]:
+ pass
+ ctypedef class numpy.flexible [object PyObject]:
+ pass
+ ctypedef class numpy.character [object PyObject]:
+ pass
+
+ ctypedef struct PyDatetimeScalarObject:
+ # PyObject_HEAD
+ npy_datetime obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef struct PyTimedeltaScalarObject:
+ # PyObject_HEAD
+ npy_timedelta obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef enum NPY_DATETIMEUNIT:
+ NPY_FR_Y
+ NPY_FR_M
+ NPY_FR_W
+ NPY_FR_D
+ NPY_FR_B
+ NPY_FR_h
+ NPY_FR_m
+ NPY_FR_s
+ NPY_FR_ms
+ NPY_FR_us
+ NPY_FR_ns
+ NPY_FR_ps
+ NPY_FR_fs
+ NPY_FR_as
+ NPY_FR_GENERIC
+
+
+cdef extern from "numpy/arrayobject.h":
+ # These are part of the C-API defined in `__multiarray_api.h`
+
+ # NumPy internal definitions in datetime_strings.c:
+ int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" (
+ int local, NPY_DATETIMEUNIT base)
+ int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" (
+ npy_datetimestruct *dts, char *outstr, npy_intp outlen,
+ int local, int utc, NPY_DATETIMEUNIT base, int tzoffset,
+ NPY_CASTING casting) except -1
+
+ # NumPy internal definition in datetime.c:
+ # May return 1 to indicate that object does not appear to be a datetime
+ # (returns 0 on success).
+ int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" (
+ PyObject *obj, npy_datetimestruct *out,
+ NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1
+ int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" (
+ PyArray_DatetimeMetaData *meta, npy_datetime dt,
+ npy_datetimestruct *out) except -1
+ int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"(
+ PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts,
+ npy_datetime *out) except -1
+
+
+#
+# ufunc API
+#
+
+cdef extern from "numpy/ufuncobject.h":
+
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
+
+ ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]:
+ cdef:
+ int nin, nout, nargs
+ int identity
+ PyUFuncGenericFunction *functions
+ void **data
+ int ntypes
+ int check_return
+ char *name
+ char *types
+ char *doc
+ void *ptr
+ PyObject *obj
+ PyObject *userloops
+
+ cdef enum:
+ PyUFunc_Zero
+ PyUFunc_One
+ PyUFunc_None
+ # deprecated
+ UFUNC_FPE_DIVIDEBYZERO
+ UFUNC_FPE_OVERFLOW
+ UFUNC_FPE_UNDERFLOW
+ UFUNC_FPE_INVALID
+ # use these instead
+ NPY_FPE_DIVIDEBYZERO
+ NPY_FPE_OVERFLOW
+ NPY_FPE_UNDERFLOW
+ NPY_FPE_INVALID
+
+
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
+ void **, char *, int, int, int, int, char *, char *, int)
+ int PyUFunc_RegisterLoopForType(ufunc, int,
+ PyUFuncGenericFunction, int *, void *) except -1
+ void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_clearfperr()
+ int PyUFunc_getfperr()
+ int PyUFunc_ReplaceLoopBySignature \
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
+ object PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
+ int, char *, char *, int, char *)
+
+ int _import_umath() except -1
+
+cdef inline void set_array_base(ndarray arr, object base) except *:
+ Py_INCREF(base) # important to do this before stealing the reference below!
+ PyArray_SetBaseObject(arr, base)
+
+cdef inline object get_array_base(ndarray arr):
+ base = PyArray_BASE(arr)
+ if base is NULL:
+ return None
+ return base
+
+# Versions of the import_* functions which are more suitable for
+# Cython code.
+cdef inline int import_array() except -1:
+ try:
+ __pyx_import_array()
+ except Exception:
+ raise ImportError("numpy._core.multiarray failed to import")
+
+cdef inline int import_umath() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy._core.umath failed to import")
+
+cdef inline int import_ufunc() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy._core.umath failed to import")
+
+
+cdef inline bint is_timedelta64_object(object obj) noexcept:
+ """
+ Cython equivalent of `isinstance(obj, np.timedelta64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)
+
+
+cdef inline bint is_datetime64_object(object obj) noexcept:
+ """
+ Cython equivalent of `isinstance(obj, np.datetime64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)
+
+
+cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil:
+ """
+ returns the int64 value underlying scalar numpy datetime64 object
+
+ Note that to interpret this as a datetime, the corresponding unit is
+ also needed. That can be found using `get_datetime64_unit`.
+ """
+ return (obj).obval
+
+
+cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil:
+ """
+ returns the int64 value underlying scalar numpy timedelta64 object
+ """
+ return (obj).obval
+
+
+cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil:
+ """
+ returns the unit part of the dtype for a numpy datetime64 object.
+ """
+ return (obj).obmeta.base
+
+
+cdef extern from "numpy/arrayobject.h":
+
+ ctypedef struct NpyIter:
+ pass
+
+ cdef enum:
+ NPY_FAIL
+ NPY_SUCCEED
+
+ cdef enum:
+ # Track an index representing C order
+ NPY_ITER_C_INDEX
+ # Track an index representing Fortran order
+ NPY_ITER_F_INDEX
+ # Track a multi-index
+ NPY_ITER_MULTI_INDEX
+ # User code external to the iterator does the 1-dimensional innermost loop
+ NPY_ITER_EXTERNAL_LOOP
+ # Convert all the operands to a common data type
+ NPY_ITER_COMMON_DTYPE
+ # Operands may hold references, requiring API access during iteration
+ NPY_ITER_REFS_OK
+ # Zero-sized operands should be permitted, iteration checks IterSize for 0
+ NPY_ITER_ZEROSIZE_OK
+ # Permits reductions (size-0 stride with dimension size > 1)
+ NPY_ITER_REDUCE_OK
+ # Enables sub-range iteration
+ NPY_ITER_RANGED
+ # Enables buffering
+ NPY_ITER_BUFFERED
+ # When buffering is enabled, grows the inner loop if possible
+ NPY_ITER_GROWINNER
+ # Delay allocation of buffers until first Reset* call
+ NPY_ITER_DELAY_BUFALLOC
+ # When NPY_KEEPORDER is specified, disable reversing negative-stride axes
+ NPY_ITER_DONT_NEGATE_STRIDES
+ NPY_ITER_COPY_IF_OVERLAP
+ # The operand will be read from and written to
+ NPY_ITER_READWRITE
+ # The operand will only be read from
+ NPY_ITER_READONLY
+ # The operand will only be written to
+ NPY_ITER_WRITEONLY
+ # The operand's data must be in native byte order
+ NPY_ITER_NBO
+ # The operand's data must be aligned
+ NPY_ITER_ALIGNED
+ # The operand's data must be contiguous (within the inner loop)
+ NPY_ITER_CONTIG
+ # The operand may be copied to satisfy requirements
+ NPY_ITER_COPY
+ # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements
+ NPY_ITER_UPDATEIFCOPY
+ # Allocate the operand if it is NULL
+ NPY_ITER_ALLOCATE
+ # If an operand is allocated, don't use any subtype
+ NPY_ITER_NO_SUBTYPE
+ # This is a virtual array slot, operand is NULL but temporary data is there
+ NPY_ITER_VIRTUAL
+ # Require that the dimension match the iterator dimensions exactly
+ NPY_ITER_NO_BROADCAST
+ # A mask is being used on this array, affects buffer -> array copy
+ NPY_ITER_WRITEMASKED
+ # This array is the mask for all WRITEMASKED operands
+ NPY_ITER_ARRAYMASK
+ # Assume iterator order data access for COPY_IF_OVERLAP
+ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
+
+ # construction and destruction functions
+ NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order,
+ NPY_CASTING casting, dtype datatype) except NULL
+ NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags,
+ NPY_ORDER order, NPY_CASTING casting, npy_uint32*
+ op_flags, PyArray_Descr** op_dtypes) except NULL
+ NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op,
+ npy_uint32 flags, NPY_ORDER order,
+ NPY_CASTING casting, npy_uint32* op_flags,
+ PyArray_Descr** op_dtypes, int oa_ndim,
+ int** op_axes, const npy_intp* itershape,
+ npy_intp buffersize) except NULL
+ NpyIter* NpyIter_Copy(NpyIter* it) except NULL
+ int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL
+ int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL
+ int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL
+ int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL
+ int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL
+ int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart,
+ npy_intp iend, char** errmsg) except NPY_FAIL
+ int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL
+ int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL
+ int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL
+ npy_intp NpyIter_GetIterSize(NpyIter* it) nogil
+ npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil
+ void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart,
+ npy_intp* iend) nogil
+ int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL
+ npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil
+ npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil
+ npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil
+ npy_bool NpyIter_HasIndex(NpyIter* it) nogil
+ npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil
+ npy_bool NpyIter_IsBuffered(NpyIter* it) nogil
+ npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil
+ npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil
+ int NpyIter_GetNDim(NpyIter* it) nogil
+ int NpyIter_GetNOp(NpyIter* it) nogil
+ npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL
+ int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil
+ PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it)
+ PyArrayObject** NpyIter_GetOperandArray(NpyIter* it)
+ ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i)
+ void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags)
+ void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags)
+ int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize,
+ npy_intp* outstrides) except NPY_FAIL
+ npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil
+ # functions for iterating an NpyIter object
+ #
+ # These don't match the definition in the C API because Cython can't wrap
+ # function pointers that return functions.
+ NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL
+ NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it,
+ char** errmsg) except NULL
+ char** NpyIter_GetDataPtrArray(NpyIter* it) nogil
+ char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil
+ npy_intp* NpyIter_GetIndexPtr(NpyIter* it)
+ npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil
+ npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil
+ void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil
+ npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil
+ void NpyIter_DebugPrint(NpyIter* it)
+
+# NpyString API
+cdef extern from "numpy/ndarraytypes.h":
+ ctypedef struct npy_string_allocator:
+ pass
+
+ ctypedef struct npy_packed_static_string:
+ pass
+
+ ctypedef struct npy_static_string:
+ size_t size
+ const char *buf
+
+ ctypedef struct PyArray_StringDTypeObject:
+ PyArray_Descr base
+ PyObject *na_object
+ char coerce
+ char has_nan_na
+ char has_string_na
+ char array_owned
+ npy_static_string default_string
+ npy_static_string na_name
+ npy_string_allocator *allocator
+
+cdef extern from "numpy/arrayobject.h":
+ npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr)
+ void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[])
+ void NpyString_release_allocator(npy_string_allocator *allocator)
+ void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[])
+ int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string)
+ int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string)
+ int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size)
diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.pxd b/.venv/lib/python3.12/site-packages/numpy/__init__.pxd
new file mode 100644
index 00000000..eb076412
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/__init__.pxd
@@ -0,0 +1,1154 @@
+# NumPy static imports for Cython < 3.0
+#
+# If any of the PyArray_* functions are called, import_array must be
+# called first.
+#
+# Author: Dag Sverre Seljebotn
+#
+
+DEF _buffer_format_string_len = 255
+
+cimport cpython.buffer as pybuf
+from cpython.ref cimport Py_INCREF
+from cpython.mem cimport PyObject_Malloc, PyObject_Free
+from cpython.object cimport PyObject, PyTypeObject
+from cpython.buffer cimport PyObject_GetBuffer
+from cpython.type cimport type
+cimport libc.stdio as stdio
+
+
+cdef extern from *:
+ # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython.
+ # See https://github.com/cython/cython/issues/3573
+ """
+ /* Using NumPy API declarations from "numpy/__init__.pxd" */
+ """
+
+
+cdef extern from "Python.h":
+ ctypedef int Py_intptr_t
+ bint PyObject_TypeCheck(object obj, PyTypeObject* type)
+
+cdef extern from "numpy/arrayobject.h":
+ # It would be nice to use size_t and ssize_t, but ssize_t has special
+ # implicit conversion rules, so just use "long".
+ # Note: The actual type only matters for Cython promotion, so long
+ # is closer than int, but could lead to incorrect promotion.
+ # (Not to worrying, and always the status-quo.)
+ ctypedef signed long npy_intp
+ ctypedef unsigned long npy_uintp
+
+ ctypedef unsigned char npy_bool
+
+ ctypedef signed char npy_byte
+ ctypedef signed short npy_short
+ ctypedef signed int npy_int
+ ctypedef signed long npy_long
+ ctypedef signed long long npy_longlong
+
+ ctypedef unsigned char npy_ubyte
+ ctypedef unsigned short npy_ushort
+ ctypedef unsigned int npy_uint
+ ctypedef unsigned long npy_ulong
+ ctypedef unsigned long long npy_ulonglong
+
+ ctypedef float npy_float
+ ctypedef double npy_double
+ ctypedef long double npy_longdouble
+
+ ctypedef signed char npy_int8
+ ctypedef signed short npy_int16
+ ctypedef signed int npy_int32
+ ctypedef signed long long npy_int64
+
+ ctypedef unsigned char npy_uint8
+ ctypedef unsigned short npy_uint16
+ ctypedef unsigned int npy_uint32
+ ctypedef unsigned long long npy_uint64
+
+ ctypedef float npy_float32
+ ctypedef double npy_float64
+ ctypedef long double npy_float80
+ ctypedef long double npy_float96
+ ctypedef long double npy_float128
+
+ ctypedef struct npy_cfloat:
+ pass
+
+ ctypedef struct npy_cdouble:
+ pass
+
+ ctypedef struct npy_clongdouble:
+ pass
+
+ ctypedef struct npy_complex64:
+ pass
+
+ ctypedef struct npy_complex128:
+ pass
+
+ ctypedef struct npy_complex160:
+ pass
+
+ ctypedef struct npy_complex192:
+ pass
+
+ ctypedef struct npy_complex256:
+ pass
+
+ ctypedef struct PyArray_Dims:
+ npy_intp *ptr
+ int len
+
+
+ cdef enum NPY_TYPES:
+ NPY_BOOL
+ NPY_BYTE
+ NPY_UBYTE
+ NPY_SHORT
+ NPY_USHORT
+ NPY_INT
+ NPY_UINT
+ NPY_LONG
+ NPY_ULONG
+ NPY_LONGLONG
+ NPY_ULONGLONG
+ NPY_FLOAT
+ NPY_DOUBLE
+ NPY_LONGDOUBLE
+ NPY_CFLOAT
+ NPY_CDOUBLE
+ NPY_CLONGDOUBLE
+ NPY_OBJECT
+ NPY_STRING
+ NPY_UNICODE
+ NPY_VSTRING
+ NPY_VOID
+ NPY_DATETIME
+ NPY_TIMEDELTA
+ NPY_NTYPES_LEGACY
+ NPY_NOTYPE
+
+ NPY_INT8
+ NPY_INT16
+ NPY_INT32
+ NPY_INT64
+ NPY_UINT8
+ NPY_UINT16
+ NPY_UINT32
+ NPY_UINT64
+ NPY_FLOAT16
+ NPY_FLOAT32
+ NPY_FLOAT64
+ NPY_FLOAT80
+ NPY_FLOAT96
+ NPY_FLOAT128
+ NPY_COMPLEX64
+ NPY_COMPLEX128
+ NPY_COMPLEX160
+ NPY_COMPLEX192
+ NPY_COMPLEX256
+
+ NPY_INTP
+ NPY_UINTP
+ NPY_DEFAULT_INT # Not a compile time constant (normally)!
+
+ ctypedef enum NPY_ORDER:
+ NPY_ANYORDER
+ NPY_CORDER
+ NPY_FORTRANORDER
+ NPY_KEEPORDER
+
+ ctypedef enum NPY_CASTING:
+ NPY_NO_CASTING
+ NPY_EQUIV_CASTING
+ NPY_SAFE_CASTING
+ NPY_SAME_KIND_CASTING
+ NPY_UNSAFE_CASTING
+
+ ctypedef enum NPY_CLIPMODE:
+ NPY_CLIP
+ NPY_WRAP
+ NPY_RAISE
+
+ ctypedef enum NPY_SCALARKIND:
+ NPY_NOSCALAR,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+
+ ctypedef enum NPY_SORTKIND:
+ NPY_QUICKSORT
+ NPY_HEAPSORT
+ NPY_MERGESORT
+
+ ctypedef enum NPY_SEARCHSIDE:
+ NPY_SEARCHLEFT
+ NPY_SEARCHRIGHT
+
+ enum:
+ NPY_ARRAY_C_CONTIGUOUS
+ NPY_ARRAY_F_CONTIGUOUS
+ NPY_ARRAY_OWNDATA
+ NPY_ARRAY_FORCECAST
+ NPY_ARRAY_ENSURECOPY
+ NPY_ARRAY_ENSUREARRAY
+ NPY_ARRAY_ELEMENTSTRIDES
+ NPY_ARRAY_ALIGNED
+ NPY_ARRAY_NOTSWAPPED
+ NPY_ARRAY_WRITEABLE
+ NPY_ARRAY_WRITEBACKIFCOPY
+
+ NPY_ARRAY_BEHAVED
+ NPY_ARRAY_BEHAVED_NS
+ NPY_ARRAY_CARRAY
+ NPY_ARRAY_CARRAY_RO
+ NPY_ARRAY_FARRAY
+ NPY_ARRAY_FARRAY_RO
+ NPY_ARRAY_DEFAULT
+
+ NPY_ARRAY_IN_ARRAY
+ NPY_ARRAY_OUT_ARRAY
+ NPY_ARRAY_INOUT_ARRAY
+ NPY_ARRAY_IN_FARRAY
+ NPY_ARRAY_OUT_FARRAY
+ NPY_ARRAY_INOUT_FARRAY
+
+ NPY_ARRAY_UPDATE_ALL
+
+ cdef enum:
+ NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x
+ NPY_RAVEL_AXIS # Used for functions like PyArray_Mean
+
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
+
+ ctypedef struct PyArray_ArrayDescr:
+ # shape is a tuple, but Cython doesn't support "tuple shape"
+ # inside a non-PyObject declaration, so we have to declare it
+ # as just a PyObject*.
+ PyObject* shape
+
+ ctypedef struct PyArray_Descr:
+ pass
+
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined.
+ cdef PyTypeObject* typeobj
+ cdef char kind
+ cdef char type
+ # Numpy sometimes mutates this without warning (e.g. it'll
+ # sometimes change "|" to "<" in shared dtype objects on
+ # little-endian machines). If this matters to you, use
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
+ # directly accessing this field.
+ cdef char byteorder
+ # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS.
+ # cdef char flags
+ cdef int type_num
+ # itemsize/elsize, alignment, fields, names, and subarray must
+ # use the `PyDataType_*` accessor macros. With Cython 3 you can
+ # still use getter attributes `dtype.itemsize`
+
+ ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
+ cdef int numiter
+ cdef npy_intp size, index
+ cdef int nd
+ cdef npy_intp *dimensions
+ cdef void **iters
+
+ ctypedef struct PyArrayObject:
+ # For use in situations where ndarray can't replace PyArrayObject*,
+ # like PyArrayObject**.
+ pass
+
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
+
+ cdef:
+ # Only taking a few of the most commonly used and stable fields.
+ # One should use PyArray_* macros instead to access the C fields.
+ char *data
+ int ndim "nd"
+ npy_intp *shape "dimensions"
+ npy_intp *strides
+ dtype descr # deprecated since NumPy 1.7 !
+ PyObject* base # NOT PUBLIC, DO NOT USE !
+
+
+ int _import_array() except -1
+ # A second definition so _import_array isn't marked as used when we use it here.
+ # Do not use - subject to change any time.
+ int __pyx_import_array "_import_array"() except -1
+
+ #
+ # Macros from ndarrayobject.h
+ #
+ bint PyArray_CHKFLAGS(ndarray m, int flags) nogil
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_ISCONTIGUOUS(ndarray m) nogil
+ bint PyArray_ISWRITEABLE(ndarray m) nogil
+ bint PyArray_ISALIGNED(ndarray m) nogil
+
+ int PyArray_NDIM(ndarray) nogil
+ bint PyArray_ISONESEGMENT(ndarray) nogil
+ bint PyArray_ISFORTRAN(ndarray) nogil
+ int PyArray_FORTRANIF(ndarray) nogil
+
+ void* PyArray_DATA(ndarray) nogil
+ char* PyArray_BYTES(ndarray) nogil
+
+ npy_intp* PyArray_DIMS(ndarray) nogil
+ npy_intp* PyArray_STRIDES(ndarray) nogil
+ npy_intp PyArray_DIM(ndarray, size_t) nogil
+ npy_intp PyArray_STRIDE(ndarray, size_t) nogil
+
+ PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype!
+ PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr.
+ int PyArray_FLAGS(ndarray) nogil
+ void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
+ void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
+ npy_intp PyArray_ITEMSIZE(ndarray) nogil
+ int PyArray_TYPE(ndarray arr) nogil
+
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1
+
+ bint PyTypeNum_ISBOOL(int) nogil
+ bint PyTypeNum_ISUNSIGNED(int) nogil
+ bint PyTypeNum_ISSIGNED(int) nogil
+ bint PyTypeNum_ISINTEGER(int) nogil
+ bint PyTypeNum_ISFLOAT(int) nogil
+ bint PyTypeNum_ISNUMBER(int) nogil
+ bint PyTypeNum_ISSTRING(int) nogil
+ bint PyTypeNum_ISCOMPLEX(int) nogil
+ bint PyTypeNum_ISFLEXIBLE(int) nogil
+ bint PyTypeNum_ISUSERDEF(int) nogil
+ bint PyTypeNum_ISEXTENDED(int) nogil
+ bint PyTypeNum_ISOBJECT(int) nogil
+
+ npy_intp PyDataType_ELSIZE(dtype) nogil
+ npy_intp PyDataType_ALIGNMENT(dtype) nogil
+ PyObject* PyDataType_METADATA(dtype) nogil
+ PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil
+ PyObject* PyDataType_NAMES(dtype) nogil
+ PyObject* PyDataType_FIELDS(dtype) nogil
+
+ bint PyDataType_ISBOOL(dtype) nogil
+ bint PyDataType_ISUNSIGNED(dtype) nogil
+ bint PyDataType_ISSIGNED(dtype) nogil
+ bint PyDataType_ISINTEGER(dtype) nogil
+ bint PyDataType_ISFLOAT(dtype) nogil
+ bint PyDataType_ISNUMBER(dtype) nogil
+ bint PyDataType_ISSTRING(dtype) nogil
+ bint PyDataType_ISCOMPLEX(dtype) nogil
+ bint PyDataType_ISFLEXIBLE(dtype) nogil
+ bint PyDataType_ISUSERDEF(dtype) nogil
+ bint PyDataType_ISEXTENDED(dtype) nogil
+ bint PyDataType_ISOBJECT(dtype) nogil
+ bint PyDataType_HASFIELDS(dtype) nogil
+ bint PyDataType_HASSUBARRAY(dtype) nogil
+ npy_uint64 PyDataType_FLAGS(dtype) nogil
+
+ bint PyArray_ISBOOL(ndarray) nogil
+ bint PyArray_ISUNSIGNED(ndarray) nogil
+ bint PyArray_ISSIGNED(ndarray) nogil
+ bint PyArray_ISINTEGER(ndarray) nogil
+ bint PyArray_ISFLOAT(ndarray) nogil
+ bint PyArray_ISNUMBER(ndarray) nogil
+ bint PyArray_ISSTRING(ndarray) nogil
+ bint PyArray_ISCOMPLEX(ndarray) nogil
+ bint PyArray_ISFLEXIBLE(ndarray) nogil
+ bint PyArray_ISUSERDEF(ndarray) nogil
+ bint PyArray_ISEXTENDED(ndarray) nogil
+ bint PyArray_ISOBJECT(ndarray) nogil
+ bint PyArray_HASFIELDS(ndarray) nogil
+
+ bint PyArray_ISVARIABLE(ndarray) nogil
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil
+ bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray) nogil
+ bint PyArray_ISBYTESWAPPED(ndarray) nogil
+
+ bint PyArray_FLAGSWAP(ndarray, int) nogil
+
+ bint PyArray_ISCARRAY(ndarray) nogil
+ bint PyArray_ISCARRAY_RO(ndarray) nogil
+ bint PyArray_ISFARRAY(ndarray) nogil
+ bint PyArray_ISFARRAY_RO(ndarray) nogil
+ bint PyArray_ISBEHAVED(ndarray) nogil
+ bint PyArray_ISBEHAVED_RO(ndarray) nogil
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype) nogil
+ bint PyDataType_ISBYTESWAPPED(dtype) nogil
+
+ bint PyArray_DescrCheck(object)
+
+ bint PyArray_Check(object)
+ bint PyArray_CheckExact(object)
+
+ # Cannot be supported due to out arg:
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
+ # bint PyArray_HasArrayInterface(op, out)
+
+
+ bint PyArray_IsZeroDim(object)
+ # Cannot be supported due to ## ## in macro:
+ # bint PyArray_IsScalar(object, verbatim work)
+ bint PyArray_CheckScalar(object)
+ bint PyArray_IsPythonNumber(object)
+ bint PyArray_IsPythonScalar(object)
+ bint PyArray_IsAnyScalar(object)
+ bint PyArray_CheckAnyScalar(object)
+
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray) nogil
+ npy_intp PyArray_SIZE(ndarray) nogil
+ npy_intp PyArray_NBYTES(ndarray) nogil
+
+ object PyArray_FROM_O(object)
+ object PyArray_FROM_OF(object m, int flags)
+ object PyArray_FROM_OT(object m, int type)
+ object PyArray_FROM_OTF(object m, int type, int flags)
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
+ void PyArray_FILLWBYTE(ndarray, int val)
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
+ bint PyArray_EquivByteorders(int b1, int b2) nogil
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
+ object PyArray_ToScalar(void* data, ndarray arr)
+
+ void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
+
+ # Cannot be supported due to out arg
+ # void PyArray_DESCR_REPLACE(descr)
+
+
+ object PyArray_Copy(ndarray)
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
+
+ object PyArray_Cast(ndarray mp, int type_num)
+ object PyArray_Take(ndarray ap, object items, int axis)
+ object PyArray_Put(ndarray ap, object items, object values)
+
+ void PyArray_ITER_RESET(flatiter it) nogil
+ void PyArray_ITER_NEXT(flatiter it) nogil
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
+
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
+ npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil
+ int PyArray_MultiIter_NDIM(broadcast multi) nogil
+ npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil
+ int PyArray_MultiIter_NUMITER(broadcast multi) nogil
+ npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil
+ void** PyArray_MultiIter_ITERS(broadcast multi) nogil
+
+ # Functions from __multiarray_api.h
+
+ # Functions taking dtype and returning object/ndarray are disabled
+ # for now as they steal dtype references. I'm conservative and disable
+ # more than is probably needed until it can be checked further.
+ int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF...
+ int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF...
+ dtype PyArray_DescrFromType (int)
+ object PyArray_TypeObjectFromType (int)
+ char * PyArray_Zero (ndarray)
+ char * PyArray_One (ndarray)
+ #object PyArray_CastToType (ndarray, dtype, int)
+ int PyArray_CanCastSafely (int, int) # writes errors
+ npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors
+ int PyArray_ObjectType (object, int) except 0
+ dtype PyArray_DescrFromObject (object, dtype)
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
+ dtype PyArray_DescrFromScalar (object)
+ dtype PyArray_DescrFromTypeObject (object)
+ npy_intp PyArray_Size (object)
+ #object PyArray_Scalar (void *, dtype, object)
+ #object PyArray_FromScalar (object, dtype)
+ void PyArray_ScalarAsCtype (object, void *)
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
+ object PyArray_EnsureArray (object)
+ object PyArray_EnsureAnyArray (object)
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
+ #object PyArray_FromIter (object, dtype, npy_intp)
+ object PyArray_Return (ndarray)
+ #object PyArray_GetField (ndarray, dtype, int)
+ #int PyArray_SetField (ndarray, dtype, int, object) except -1
+ object PyArray_Byteswap (ndarray, npy_bool)
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
+ int PyArray_CopyInto (ndarray, ndarray) except -1
+ int PyArray_CopyAnyInto (ndarray, ndarray) except -1
+ int PyArray_CopyObject (ndarray, object) except -1
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
+ object PyArray_ToList (ndarray)
+ object PyArray_ToString (ndarray, NPY_ORDER)
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1
+ int PyArray_Dump (object, object, int) except -1
+ object PyArray_Dumps (object, int)
+ int PyArray_ValidType (int) # Cannot error
+ void PyArray_UpdateFlags (ndarray, int)
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
+ #dtype PyArray_DescrNew (dtype)
+ dtype PyArray_DescrNewFromType (int)
+ double PyArray_GetPriority (object, double) # clears errors as of 1.25
+ object PyArray_IterNew (object)
+ object PyArray_MultiIterNew (int, ...)
+
+ int PyArray_PyIntAsInt (object) except? -1
+ npy_intp PyArray_PyIntAsIntp (object)
+ int PyArray_Broadcast (broadcast) except -1
+ int PyArray_FillWithScalar (ndarray, object) except -1
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
+ dtype PyArray_DescrNewByteorder (dtype, char)
+ object PyArray_IterAllButAxis (object, int *)
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
+ #object PyArray_FromArray (ndarray, dtype, int)
+ object PyArray_FromInterface (object)
+ object PyArray_FromStructInterface (object)
+ #object PyArray_FromArrayAttr (object, dtype, object)
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
+ npy_bool PyArray_CanCastScalar (type, type)
+ int PyArray_RemoveSmallest (broadcast) except -1
+ int PyArray_ElementStrides (object)
+ void PyArray_Item_INCREF (char *, dtype) except *
+ void PyArray_Item_XDECREF (char *, dtype) except *
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
+ object PyArray_PutMask (ndarray, object, object)
+ object PyArray_Repeat (ndarray, object, int)
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *)
+ object PyArray_ArgMax (ndarray, int, ndarray)
+ object PyArray_ArgMin (ndarray, int, ndarray)
+ object PyArray_Reshape (ndarray, object)
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
+ object PyArray_Squeeze (ndarray)
+ #object PyArray_View (ndarray, dtype, type)
+ object PyArray_SwapAxes (ndarray, int, int)
+ object PyArray_Max (ndarray, int, ndarray)
+ object PyArray_Min (ndarray, int, ndarray)
+ object PyArray_Ptp (ndarray, int, ndarray)
+ object PyArray_Mean (ndarray, int, int, ndarray)
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
+ object PyArray_Diagonal (ndarray, int, int, int)
+ object PyArray_Clip (ndarray, object, object, ndarray)
+ object PyArray_Conjugate (ndarray, ndarray)
+ object PyArray_Nonzero (ndarray)
+ object PyArray_Std (ndarray, int, int, ndarray, int)
+ object PyArray_Sum (ndarray, int, int, ndarray)
+ object PyArray_CumSum (ndarray, int, int, ndarray)
+ object PyArray_Prod (ndarray, int, int, ndarray)
+ object PyArray_CumProd (ndarray, int, int, ndarray)
+ object PyArray_All (ndarray, int, ndarray)
+ object PyArray_Any (ndarray, int, ndarray)
+ object PyArray_Compress (ndarray, object, int, ndarray)
+ object PyArray_Flatten (ndarray, NPY_ORDER)
+ object PyArray_Ravel (ndarray, NPY_ORDER)
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
+ int PyArray_MultiplyIntList (int *, int)
+ void * PyArray_GetPtr (ndarray, npy_intp*)
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
+ int PyArray_Free (object, void *)
+ #int PyArray_Converter (object, object*)
+ int PyArray_IntpFromSequence (object, npy_intp *, int) except -1
+ object PyArray_Concatenate (object, int)
+ object PyArray_InnerProduct (object, object)
+ object PyArray_MatrixProduct (object, object)
+ object PyArray_Correlate (object, object, int)
+ #int PyArray_DescrConverter (object, dtype*) except 0
+ #int PyArray_DescrConverter2 (object, dtype*) except 0
+ int PyArray_IntpConverter (object, PyArray_Dims *) except 0
+ #int PyArray_BufferConverter (object, chunk) except 0
+ int PyArray_AxisConverter (object, int *) except 0
+ int PyArray_BoolConverter (object, npy_bool *) except 0
+ int PyArray_ByteorderConverter (object, char *) except 0
+ int PyArray_OrderConverter (object, NPY_ORDER *) except 0
+ unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
+ object PyArray_Where (object, object, object)
+ object PyArray_Arange (double, double, double, int)
+ #object PyArray_ArangeObj (object, object, object, dtype)
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0
+ object PyArray_LexSort (object, int)
+ object PyArray_Round (ndarray, int, ndarray)
+ unsigned char PyArray_EquivTypenums (int, int)
+ int PyArray_RegisterDataType (dtype) except -1
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0
+ #int PyArray_OutputConverter (object, ndarray*) except 0
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
+ #int PyArray_DescrAlignConverter (object, dtype*) except 0
+ #int PyArray_DescrAlignConverter2 (object, dtype*) except 0
+ int PyArray_SearchsideConverter (object, void *) except 0
+ object PyArray_CheckAxis (ndarray, int *, int)
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
+ int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead.
+
+ # The memory handler functions require the NumPy 1.22 API
+ # and may require defining NPY_TARGET_VERSION
+ ctypedef struct PyDataMemAllocator:
+ void *ctx
+ void* (*malloc) (void *ctx, size_t size)
+ void* (*calloc) (void *ctx, size_t nelem, size_t elsize)
+ void* (*realloc) (void *ctx, void *ptr, size_t new_size)
+ void (*free) (void *ctx, void *ptr, size_t size)
+
+ ctypedef struct PyDataMem_Handler:
+ char* name
+ npy_uint8 version
+ PyDataMemAllocator allocator
+
+ object PyDataMem_SetHandler(object handler)
+ object PyDataMem_GetHandler()
+
+ # additional datetime related functions are defined below
+
+
+# Typedefs that matches the runtime dtype objects in
+# the numpy module.
+
+# The ones that are commented out needs an IFDEF function
+# in Cython to enable them only on the right systems.
+
+ctypedef npy_int8 int8_t
+ctypedef npy_int16 int16_t
+ctypedef npy_int32 int32_t
+ctypedef npy_int64 int64_t
+
+ctypedef npy_uint8 uint8_t
+ctypedef npy_uint16 uint16_t
+ctypedef npy_uint32 uint32_t
+ctypedef npy_uint64 uint64_t
+
+ctypedef npy_float32 float32_t
+ctypedef npy_float64 float64_t
+#ctypedef npy_float80 float80_t
+#ctypedef npy_float128 float128_t
+
+ctypedef float complex complex64_t
+ctypedef double complex complex128_t
+
+ctypedef npy_longlong longlong_t
+ctypedef npy_ulonglong ulonglong_t
+
+ctypedef npy_intp intp_t
+ctypedef npy_uintp uintp_t
+
+ctypedef npy_double float_t
+ctypedef npy_double double_t
+ctypedef npy_longdouble longdouble_t
+
+ctypedef float complex cfloat_t
+ctypedef double complex cdouble_t
+ctypedef double complex complex_t
+ctypedef long double complex clongdouble_t
+
+cdef inline object PyArray_MultiIterNew1(a):
+ return PyArray_MultiIterNew(1, a)
+
+cdef inline object PyArray_MultiIterNew2(a, b):
+ return PyArray_MultiIterNew(2, a, b)
+
+cdef inline object PyArray_MultiIterNew3(a, b, c):
+ return PyArray_MultiIterNew(3, a, b, c)
+
+cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ return PyArray_MultiIterNew(4, a, b, c, d)
+
+cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ return PyArray_MultiIterNew(5, a, b, c, d, e)
+
+cdef inline tuple PyDataType_SHAPE(dtype d):
+ if PyDataType_HASSUBARRAY(d):
+ return d.subarray.shape
+ else:
+ return ()
+
+
+cdef extern from "numpy/ndarrayobject.h":
+ PyTypeObject PyTimedeltaArrType_Type
+ PyTypeObject PyDatetimeArrType_Type
+ ctypedef int64_t npy_timedelta
+ ctypedef int64_t npy_datetime
+
+cdef extern from "numpy/ndarraytypes.h":
+ ctypedef struct PyArray_DatetimeMetaData:
+ NPY_DATETIMEUNIT base
+ int64_t num
+
+ ctypedef struct npy_datetimestruct:
+ int64_t year
+ int32_t month, day, hour, min, sec, us, ps, as
+
+ # Iterator API added in v1.6
+ #
+ # These don't match the definition in the C API because Cython can't wrap
+ # function pointers that return functions.
+ # https://github.com/cython/cython/issues/6720
+ ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil
+ ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil
+
+cdef extern from "numpy/arrayscalars.h":
+
+ # abstract types
+ ctypedef class numpy.generic [object PyObject]:
+ pass
+ ctypedef class numpy.number [object PyObject]:
+ pass
+ ctypedef class numpy.integer [object PyObject]:
+ pass
+ ctypedef class numpy.signedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.unsignedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.inexact [object PyObject]:
+ pass
+ ctypedef class numpy.floating [object PyObject]:
+ pass
+ ctypedef class numpy.complexfloating [object PyObject]:
+ pass
+ ctypedef class numpy.flexible [object PyObject]:
+ pass
+ ctypedef class numpy.character [object PyObject]:
+ pass
+
+ ctypedef struct PyDatetimeScalarObject:
+ # PyObject_HEAD
+ npy_datetime obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef struct PyTimedeltaScalarObject:
+ # PyObject_HEAD
+ npy_timedelta obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef enum NPY_DATETIMEUNIT:
+ NPY_FR_Y
+ NPY_FR_M
+ NPY_FR_W
+ NPY_FR_D
+ NPY_FR_B
+ NPY_FR_h
+ NPY_FR_m
+ NPY_FR_s
+ NPY_FR_ms
+ NPY_FR_us
+ NPY_FR_ns
+ NPY_FR_ps
+ NPY_FR_fs
+ NPY_FR_as
+ NPY_FR_GENERIC
+
+
+cdef extern from "numpy/arrayobject.h":
+ # These are part of the C-API defined in `__multiarray_api.h`
+
+ # NumPy internal definitions in datetime_strings.c:
+ int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" (
+ int local, NPY_DATETIMEUNIT base)
+ int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" (
+ npy_datetimestruct *dts, char *outstr, npy_intp outlen,
+ int local, int utc, NPY_DATETIMEUNIT base, int tzoffset,
+ NPY_CASTING casting) except -1
+
+ # NumPy internal definition in datetime.c:
+ # May return 1 to indicate that object does not appear to be a datetime
+ # (returns 0 on success).
+ int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" (
+ PyObject *obj, npy_datetimestruct *out,
+ NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1
+ int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" (
+ PyArray_DatetimeMetaData *meta, npy_datetime dt,
+ npy_datetimestruct *out) except -1
+ int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"(
+ PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts,
+ npy_datetime *out) except -1
+
+
+#
+# ufunc API
+#
+
+cdef extern from "numpy/ufuncobject.h":
+
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
+
+ ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]:
+ cdef:
+ int nin, nout, nargs
+ int identity
+ PyUFuncGenericFunction *functions
+ void **data
+ int ntypes
+ int check_return
+ char *name
+ char *types
+ char *doc
+ void *ptr
+ PyObject *obj
+ PyObject *userloops
+
+ cdef enum:
+ PyUFunc_Zero
+ PyUFunc_One
+ PyUFunc_None
+ # deprecated
+ UFUNC_FPE_DIVIDEBYZERO
+ UFUNC_FPE_OVERFLOW
+ UFUNC_FPE_UNDERFLOW
+ UFUNC_FPE_INVALID
+ # use these instead
+ NPY_FPE_DIVIDEBYZERO
+ NPY_FPE_OVERFLOW
+ NPY_FPE_UNDERFLOW
+ NPY_FPE_INVALID
+
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
+ void **, char *, int, int, int, int, char *, char *, int)
+ int PyUFunc_RegisterLoopForType(ufunc, int,
+ PyUFuncGenericFunction, int *, void *) except -1
+ void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_clearfperr()
+ int PyUFunc_getfperr()
+ int PyUFunc_ReplaceLoopBySignature \
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
+ object PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
+ int, char *, char *, int, char *)
+
+ int _import_umath() except -1
+
+cdef inline void set_array_base(ndarray arr, object base):
+ Py_INCREF(base) # important to do this before stealing the reference below!
+ PyArray_SetBaseObject(arr, base)
+
+cdef inline object get_array_base(ndarray arr):
+ base = PyArray_BASE(arr)
+ if base is NULL:
+ return None
+ return base
+
+# Versions of the import_* functions which are more suitable for
+# Cython code.
+cdef inline int import_array() except -1:
+ try:
+ __pyx_import_array()
+ except Exception:
+ raise ImportError("numpy._core.multiarray failed to import")
+
+cdef inline int import_umath() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy._core.umath failed to import")
+
+cdef inline int import_ufunc() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy._core.umath failed to import")
+
+
+cdef inline bint is_timedelta64_object(object obj):
+ """
+ Cython equivalent of `isinstance(obj, np.timedelta64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)
+
+
+cdef inline bint is_datetime64_object(object obj):
+ """
+ Cython equivalent of `isinstance(obj, np.datetime64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)
+
+
+cdef inline npy_datetime get_datetime64_value(object obj) nogil:
+ """
+ returns the int64 value underlying scalar numpy datetime64 object
+
+ Note that to interpret this as a datetime, the corresponding unit is
+ also needed. That can be found using `get_datetime64_unit`.
+ """
+ return (obj).obval
+
+
+cdef inline npy_timedelta get_timedelta64_value(object obj) nogil:
+ """
+ returns the int64 value underlying scalar numpy timedelta64 object
+ """
+ return (obj).obval
+
+
+cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil:
+ """
+ returns the unit part of the dtype for a numpy datetime64 object.
+ """
+ return (obj).obmeta.base
+
+
+cdef extern from "numpy/arrayobject.h":
+
+ ctypedef struct NpyIter:
+ pass
+
+ cdef enum:
+ NPY_FAIL
+ NPY_SUCCEED
+
+ cdef enum:
+ # Track an index representing C order
+ NPY_ITER_C_INDEX
+ # Track an index representing Fortran order
+ NPY_ITER_F_INDEX
+ # Track a multi-index
+ NPY_ITER_MULTI_INDEX
+ # User code external to the iterator does the 1-dimensional innermost loop
+ NPY_ITER_EXTERNAL_LOOP
+ # Convert all the operands to a common data type
+ NPY_ITER_COMMON_DTYPE
+ # Operands may hold references, requiring API access during iteration
+ NPY_ITER_REFS_OK
+ # Zero-sized operands should be permitted, iteration checks IterSize for 0
+ NPY_ITER_ZEROSIZE_OK
+ # Permits reductions (size-0 stride with dimension size > 1)
+ NPY_ITER_REDUCE_OK
+ # Enables sub-range iteration
+ NPY_ITER_RANGED
+ # Enables buffering
+ NPY_ITER_BUFFERED
+ # When buffering is enabled, grows the inner loop if possible
+ NPY_ITER_GROWINNER
+ # Delay allocation of buffers until first Reset* call
+ NPY_ITER_DELAY_BUFALLOC
+ # When NPY_KEEPORDER is specified, disable reversing negative-stride axes
+ NPY_ITER_DONT_NEGATE_STRIDES
+ NPY_ITER_COPY_IF_OVERLAP
+ # The operand will be read from and written to
+ NPY_ITER_READWRITE
+ # The operand will only be read from
+ NPY_ITER_READONLY
+ # The operand will only be written to
+ NPY_ITER_WRITEONLY
+ # The operand's data must be in native byte order
+ NPY_ITER_NBO
+ # The operand's data must be aligned
+ NPY_ITER_ALIGNED
+ # The operand's data must be contiguous (within the inner loop)
+ NPY_ITER_CONTIG
+ # The operand may be copied to satisfy requirements
+ NPY_ITER_COPY
+ # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements
+ NPY_ITER_UPDATEIFCOPY
+ # Allocate the operand if it is NULL
+ NPY_ITER_ALLOCATE
+ # If an operand is allocated, don't use any subtype
+ NPY_ITER_NO_SUBTYPE
+ # This is a virtual array slot, operand is NULL but temporary data is there
+ NPY_ITER_VIRTUAL
+ # Require that the dimension match the iterator dimensions exactly
+ NPY_ITER_NO_BROADCAST
+ # A mask is being used on this array, affects buffer -> array copy
+ NPY_ITER_WRITEMASKED
+ # This array is the mask for all WRITEMASKED operands
+ NPY_ITER_ARRAYMASK
+ # Assume iterator order data access for COPY_IF_OVERLAP
+ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
+
+ # construction and destruction functions
+ NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order,
+ NPY_CASTING casting, dtype datatype) except NULL
+ NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags,
+ NPY_ORDER order, NPY_CASTING casting, npy_uint32*
+ op_flags, PyArray_Descr** op_dtypes) except NULL
+ NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op,
+ npy_uint32 flags, NPY_ORDER order,
+ NPY_CASTING casting, npy_uint32* op_flags,
+ PyArray_Descr** op_dtypes, int oa_ndim,
+ int** op_axes, const npy_intp* itershape,
+ npy_intp buffersize) except NULL
+ NpyIter* NpyIter_Copy(NpyIter* it) except NULL
+ int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL
+ int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL
+ int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL
+ int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL
+ int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL
+ int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart,
+ npy_intp iend, char** errmsg) except NPY_FAIL
+ int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL
+ int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL
+ int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL
+ npy_intp NpyIter_GetIterSize(NpyIter* it) nogil
+ npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil
+ void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart,
+ npy_intp* iend) nogil
+ int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL
+ npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil
+ npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil
+ npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil
+ npy_bool NpyIter_HasIndex(NpyIter* it) nogil
+ npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil
+ npy_bool NpyIter_IsBuffered(NpyIter* it) nogil
+ npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil
+ npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil
+ int NpyIter_GetNDim(NpyIter* it) nogil
+ int NpyIter_GetNOp(NpyIter* it) nogil
+ npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL
+ int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil
+ PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it)
+ PyArrayObject** NpyIter_GetOperandArray(NpyIter* it)
+ ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i)
+ void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags)
+ void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags)
+ int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize,
+ npy_intp* outstrides) except NPY_FAIL
+ npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil
+ # functions for iterating an NpyIter object
+ #
+ # These don't match the definition in the C API because Cython can't wrap
+ # function pointers that return functions.
+ NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL
+ NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it,
+ char** errmsg) except NULL
+ char** NpyIter_GetDataPtrArray(NpyIter* it) nogil
+ char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil
+ npy_intp* NpyIter_GetIndexPtr(NpyIter* it)
+ npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil
+ npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil
+ void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil
+ npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil
+ void NpyIter_DebugPrint(NpyIter* it)
+
+# NpyString API
+cdef extern from "numpy/ndarraytypes.h":
+ ctypedef struct npy_string_allocator:
+ pass
+
+ ctypedef struct npy_packed_static_string:
+ pass
+
+ ctypedef struct npy_static_string:
+ size_t size
+ const char *buf
+
+ ctypedef struct PyArray_StringDTypeObject:
+ PyArray_Descr base
+ PyObject *na_object
+ char coerce
+ char has_nan_na
+ char has_string_na
+ char array_owned
+ npy_static_string default_string
+ npy_static_string na_name
+ npy_string_allocator *allocator
+
+cdef extern from "numpy/arrayobject.h":
+ npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr)
+ void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[])
+ void NpyString_release_allocator(npy_string_allocator *allocator)
+ void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[])
+ int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string)
+ int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string)
+ int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size)
diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.py b/.venv/lib/python3.12/site-packages/numpy/__init__.py
new file mode 100644
index 00000000..8fb2e742
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/__init__.py
@@ -0,0 +1,928 @@
+"""
+NumPy
+=====
+
+Provides
+ 1. An array object of arbitrary homogeneous items
+ 2. Fast mathematical operations over arrays
+ 3. Linear Algebra, Fourier Transforms, Random Number Generation
+
+How to use the documentation
+----------------------------
+Documentation is available in two forms: docstrings provided
+with the code, and a loose standing reference guide, available from
+`the NumPy homepage `_.
+
+We recommend exploring the docstrings using
+`IPython `_, an advanced Python shell with
+TAB-completion and introspection capabilities. See below for further
+instructions.
+
+The docstring examples assume that `numpy` has been imported as ``np``::
+
+ >>> import numpy as np
+
+Code snippets are indicated by three greater-than signs::
+
+ >>> x = 42
+ >>> x = x + 1
+
+Use the built-in ``help`` function to view a function's docstring::
+
+ >>> help(np.sort)
+ ... # doctest: +SKIP
+
+For some objects, ``np.info(obj)`` may provide additional help. This is
+particularly true if you see the line "Help on ufunc object:" at the top
+of the help() page. Ufuncs are implemented in C, not Python, for speed.
+The native Python help() does not know how to view their help, but our
+np.info() function does.
+
+Available subpackages
+---------------------
+lib
+ Basic functions used by several sub-packages.
+random
+ Core Random Tools
+linalg
+ Core Linear Algebra Tools
+fft
+ Core FFT routines
+polynomial
+ Polynomial tools
+testing
+ NumPy testing tools
+distutils
+ Enhancements to distutils with support for
+ Fortran compilers support and more (for Python <= 3.11)
+
+Utilities
+---------
+test
+ Run numpy unittests
+show_config
+ Show numpy build configuration
+__version__
+ NumPy version string
+
+Viewing documentation using IPython
+-----------------------------------
+
+Start IPython and import `numpy` usually under the alias ``np``: `import
+numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste
+examples into the shell. To see which functions are available in `numpy`,
+type ``np.`` (where ```` refers to the TAB key), or use
+``np.*cos*?`` (where ```` refers to the ENTER key) to narrow
+down the list. To view the docstring for a function, use
+``np.cos?`` (to view the docstring) and ``np.cos??`` (to view
+the source code).
+
+Copies vs. in-place operation
+-----------------------------
+Most of the functions in `numpy` return a copy of the array argument
+(e.g., `np.sort`). In-place versions of these functions are often
+available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
+Exceptions to this rule are documented.
+
+"""
+import os
+import sys
+import warnings
+
+# If a version with git hash was stored, use that instead
+from . import version
+from ._expired_attrs_2_0 import __expired_attributes__
+from ._globals import _CopyMode, _NoValue
+from .version import __version__
+
+# We first need to detect if we're being called as part of the numpy setup
+# procedure itself in a reliable manner.
+try:
+ __NUMPY_SETUP__ # noqa: B018
+except NameError:
+ __NUMPY_SETUP__ = False
+
+if __NUMPY_SETUP__:
+ sys.stderr.write('Running from numpy source directory.\n')
+else:
+ # Allow distributors to run custom init code before importing numpy._core
+ from . import _distributor_init
+
+ try:
+ from numpy.__config__ import show_config
+ except ImportError as e:
+ msg = """Error importing numpy: you should not try to import numpy from
+ its source directory; please exit the numpy source tree, and relaunch
+ your python interpreter from there."""
+ raise ImportError(msg) from e
+
+ from . import _core
+ from ._core import (
+ False_,
+ ScalarType,
+ True_,
+ abs,
+ absolute,
+ acos,
+ acosh,
+ add,
+ all,
+ allclose,
+ amax,
+ amin,
+ any,
+ arange,
+ arccos,
+ arccosh,
+ arcsin,
+ arcsinh,
+ arctan,
+ arctan2,
+ arctanh,
+ argmax,
+ argmin,
+ argpartition,
+ argsort,
+ argwhere,
+ around,
+ array,
+ array2string,
+ array_equal,
+ array_equiv,
+ array_repr,
+ array_str,
+ asanyarray,
+ asarray,
+ ascontiguousarray,
+ asfortranarray,
+ asin,
+ asinh,
+ astype,
+ atan,
+ atan2,
+ atanh,
+ atleast_1d,
+ atleast_2d,
+ atleast_3d,
+ base_repr,
+ binary_repr,
+ bitwise_and,
+ bitwise_count,
+ bitwise_invert,
+ bitwise_left_shift,
+ bitwise_not,
+ bitwise_or,
+ bitwise_right_shift,
+ bitwise_xor,
+ block,
+ bool,
+ bool_,
+ broadcast,
+ busday_count,
+ busday_offset,
+ busdaycalendar,
+ byte,
+ bytes_,
+ can_cast,
+ cbrt,
+ cdouble,
+ ceil,
+ character,
+ choose,
+ clip,
+ clongdouble,
+ complex64,
+ complex128,
+ complexfloating,
+ compress,
+ concat,
+ concatenate,
+ conj,
+ conjugate,
+ convolve,
+ copysign,
+ copyto,
+ correlate,
+ cos,
+ cosh,
+ count_nonzero,
+ cross,
+ csingle,
+ cumprod,
+ cumsum,
+ cumulative_prod,
+ cumulative_sum,
+ datetime64,
+ datetime_as_string,
+ datetime_data,
+ deg2rad,
+ degrees,
+ diagonal,
+ divide,
+ divmod,
+ dot,
+ double,
+ dtype,
+ e,
+ einsum,
+ einsum_path,
+ empty,
+ empty_like,
+ equal,
+ errstate,
+ euler_gamma,
+ exp,
+ exp2,
+ expm1,
+ fabs,
+ finfo,
+ flatiter,
+ flatnonzero,
+ flexible,
+ float16,
+ float32,
+ float64,
+ float_power,
+ floating,
+ floor,
+ floor_divide,
+ fmax,
+ fmin,
+ fmod,
+ format_float_positional,
+ format_float_scientific,
+ frexp,
+ from_dlpack,
+ frombuffer,
+ fromfile,
+ fromfunction,
+ fromiter,
+ frompyfunc,
+ fromstring,
+ full,
+ full_like,
+ gcd,
+ generic,
+ geomspace,
+ get_printoptions,
+ getbufsize,
+ geterr,
+ geterrcall,
+ greater,
+ greater_equal,
+ half,
+ heaviside,
+ hstack,
+ hypot,
+ identity,
+ iinfo,
+ indices,
+ inexact,
+ inf,
+ inner,
+ int8,
+ int16,
+ int32,
+ int64,
+ int_,
+ intc,
+ integer,
+ intp,
+ invert,
+ is_busday,
+ isclose,
+ isdtype,
+ isfinite,
+ isfortran,
+ isinf,
+ isnan,
+ isnat,
+ isscalar,
+ issubdtype,
+ lcm,
+ ldexp,
+ left_shift,
+ less,
+ less_equal,
+ lexsort,
+ linspace,
+ little_endian,
+ log,
+ log1p,
+ log2,
+ log10,
+ logaddexp,
+ logaddexp2,
+ logical_and,
+ logical_not,
+ logical_or,
+ logical_xor,
+ logspace,
+ long,
+ longdouble,
+ longlong,
+ matmul,
+ matrix_transpose,
+ matvec,
+ max,
+ maximum,
+ may_share_memory,
+ mean,
+ memmap,
+ min,
+ min_scalar_type,
+ minimum,
+ mod,
+ modf,
+ moveaxis,
+ multiply,
+ nan,
+ ndarray,
+ ndim,
+ nditer,
+ negative,
+ nested_iters,
+ newaxis,
+ nextafter,
+ nonzero,
+ not_equal,
+ number,
+ object_,
+ ones,
+ ones_like,
+ outer,
+ partition,
+ permute_dims,
+ pi,
+ positive,
+ pow,
+ power,
+ printoptions,
+ prod,
+ promote_types,
+ ptp,
+ put,
+ putmask,
+ rad2deg,
+ radians,
+ ravel,
+ recarray,
+ reciprocal,
+ record,
+ remainder,
+ repeat,
+ require,
+ reshape,
+ resize,
+ result_type,
+ right_shift,
+ rint,
+ roll,
+ rollaxis,
+ round,
+ sctypeDict,
+ searchsorted,
+ set_printoptions,
+ setbufsize,
+ seterr,
+ seterrcall,
+ shape,
+ shares_memory,
+ short,
+ sign,
+ signbit,
+ signedinteger,
+ sin,
+ single,
+ sinh,
+ size,
+ sort,
+ spacing,
+ sqrt,
+ square,
+ squeeze,
+ stack,
+ std,
+ str_,
+ subtract,
+ sum,
+ swapaxes,
+ take,
+ tan,
+ tanh,
+ tensordot,
+ timedelta64,
+ trace,
+ transpose,
+ true_divide,
+ trunc,
+ typecodes,
+ ubyte,
+ ufunc,
+ uint,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ uintc,
+ uintp,
+ ulong,
+ ulonglong,
+ unsignedinteger,
+ unstack,
+ ushort,
+ var,
+ vdot,
+ vecdot,
+ vecmat,
+ void,
+ vstack,
+ where,
+ zeros,
+ zeros_like,
+ )
+
+ # NOTE: It's still under discussion whether these aliases
+ # should be removed.
+ for ta in ["float96", "float128", "complex192", "complex256"]:
+ try:
+ globals()[ta] = getattr(_core, ta)
+ except AttributeError:
+ pass
+ del ta
+
+ from . import lib
+ from . import matrixlib as _mat
+ from .lib import scimath as emath
+ from .lib._arraypad_impl import pad
+ from .lib._arraysetops_impl import (
+ ediff1d,
+ in1d,
+ intersect1d,
+ isin,
+ setdiff1d,
+ setxor1d,
+ union1d,
+ unique,
+ unique_all,
+ unique_counts,
+ unique_inverse,
+ unique_values,
+ )
+ from .lib._function_base_impl import (
+ angle,
+ append,
+ asarray_chkfinite,
+ average,
+ bartlett,
+ bincount,
+ blackman,
+ copy,
+ corrcoef,
+ cov,
+ delete,
+ diff,
+ digitize,
+ extract,
+ flip,
+ gradient,
+ hamming,
+ hanning,
+ i0,
+ insert,
+ interp,
+ iterable,
+ kaiser,
+ median,
+ meshgrid,
+ percentile,
+ piecewise,
+ place,
+ quantile,
+ rot90,
+ select,
+ sinc,
+ sort_complex,
+ trapezoid,
+ trapz,
+ trim_zeros,
+ unwrap,
+ vectorize,
+ )
+ from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd
+ from .lib._index_tricks_impl import (
+ c_,
+ diag_indices,
+ diag_indices_from,
+ fill_diagonal,
+ index_exp,
+ ix_,
+ mgrid,
+ ndenumerate,
+ ndindex,
+ ogrid,
+ r_,
+ ravel_multi_index,
+ s_,
+ unravel_index,
+ )
+ from .lib._nanfunctions_impl import (
+ nanargmax,
+ nanargmin,
+ nancumprod,
+ nancumsum,
+ nanmax,
+ nanmean,
+ nanmedian,
+ nanmin,
+ nanpercentile,
+ nanprod,
+ nanquantile,
+ nanstd,
+ nansum,
+ nanvar,
+ )
+ from .lib._npyio_impl import (
+ fromregex,
+ genfromtxt,
+ load,
+ loadtxt,
+ packbits,
+ save,
+ savetxt,
+ savez,
+ savez_compressed,
+ unpackbits,
+ )
+ from .lib._polynomial_impl import (
+ poly,
+ poly1d,
+ polyadd,
+ polyder,
+ polydiv,
+ polyfit,
+ polyint,
+ polymul,
+ polysub,
+ polyval,
+ roots,
+ )
+ from .lib._shape_base_impl import (
+ apply_along_axis,
+ apply_over_axes,
+ array_split,
+ column_stack,
+ dsplit,
+ dstack,
+ expand_dims,
+ hsplit,
+ kron,
+ put_along_axis,
+ row_stack,
+ split,
+ take_along_axis,
+ tile,
+ vsplit,
+ )
+ from .lib._stride_tricks_impl import (
+ broadcast_arrays,
+ broadcast_shapes,
+ broadcast_to,
+ )
+ from .lib._twodim_base_impl import (
+ diag,
+ diagflat,
+ eye,
+ fliplr,
+ flipud,
+ histogram2d,
+ mask_indices,
+ tri,
+ tril,
+ tril_indices,
+ tril_indices_from,
+ triu,
+ triu_indices,
+ triu_indices_from,
+ vander,
+ )
+ from .lib._type_check_impl import (
+ common_type,
+ imag,
+ iscomplex,
+ iscomplexobj,
+ isreal,
+ isrealobj,
+ mintypecode,
+ nan_to_num,
+ real,
+ real_if_close,
+ typename,
+ )
+ from .lib._ufunclike_impl import fix, isneginf, isposinf
+ from .lib._utils_impl import get_include, info, show_runtime
+ from .matrixlib import asmatrix, bmat, matrix
+
+ # public submodules are imported lazily, therefore are accessible from
+ # __getattr__. Note that `distutils` (deprecated) and `array_api`
+ # (experimental label) are not added here, because `from numpy import *`
+ # must not raise any warnings - that's too disruptive.
+ __numpy_submodules__ = {
+ "linalg", "fft", "dtypes", "random", "polynomial", "ma",
+ "exceptions", "lib", "ctypeslib", "testing", "typing",
+ "f2py", "test", "rec", "char", "core", "strings",
+ }
+
+ # We build warning messages for former attributes
+ _msg = (
+ "module 'numpy' has no attribute '{n}'.\n"
+ "`np.{n}` was a deprecated alias for the builtin `{n}`. "
+ "To avoid this error in existing code, use `{n}` by itself. "
+ "Doing this will not modify any behavior and is safe. {extended_msg}\n"
+ "The aliases was originally deprecated in NumPy 1.20; for more "
+ "details and guidance see the original release note at:\n"
+ " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
+
+ _specific_msg = (
+ "If you specifically wanted the numpy scalar type, use `np.{}` here.")
+
+ _int_extended_msg = (
+ "When replacing `np.{}`, you may wish to use e.g. `np.int64` "
+ "or `np.int32` to specify the precision. If you wish to review "
+ "your current use, check the release note link for "
+ "additional information.")
+
+ _type_info = [
+ ("object", ""), # The NumPy scalar only exists by name.
+ ("float", _specific_msg.format("float64")),
+ ("complex", _specific_msg.format("complex128")),
+ ("str", _specific_msg.format("str_")),
+ ("int", _int_extended_msg.format("int"))]
+
+ __former_attrs__ = {
+ n: _msg.format(n=n, extended_msg=extended_msg)
+ for n, extended_msg in _type_info
+ }
+
+ # Some of these could be defined right away, but most were aliases to
+ # the Python objects and only removed in NumPy 1.24. Defining them should
+ # probably wait for NumPy 1.26 or 2.0.
+ # When defined, these should possibly not be added to `__all__` to avoid
+ # import with `from numpy import *`.
+ __future_scalars__ = {"str", "bytes", "object"}
+
+ __array_api_version__ = "2024.12"
+
+ from ._array_api_info import __array_namespace_info__
+
+ # now that numpy core module is imported, can initialize limits
+ _core.getlimits._register_known_types()
+
+ __all__ = list(
+ __numpy_submodules__ |
+ set(_core.__all__) |
+ set(_mat.__all__) |
+ set(lib._histograms_impl.__all__) |
+ set(lib._nanfunctions_impl.__all__) |
+ set(lib._function_base_impl.__all__) |
+ set(lib._twodim_base_impl.__all__) |
+ set(lib._shape_base_impl.__all__) |
+ set(lib._type_check_impl.__all__) |
+ set(lib._arraysetops_impl.__all__) |
+ set(lib._ufunclike_impl.__all__) |
+ set(lib._arraypad_impl.__all__) |
+ set(lib._utils_impl.__all__) |
+ set(lib._stride_tricks_impl.__all__) |
+ set(lib._polynomial_impl.__all__) |
+ set(lib._npyio_impl.__all__) |
+ set(lib._index_tricks_impl.__all__) |
+ {"emath", "show_config", "__version__", "__array_namespace_info__"}
+ )
+
+ # Filter out Cython harmless warnings
+ warnings.filterwarnings("ignore", message="numpy.dtype size changed")
+ warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
+ warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
+
+ def __getattr__(attr):
+ # Warn for expired attributes
+ import warnings
+
+ if attr == "linalg":
+ import numpy.linalg as linalg
+ return linalg
+ elif attr == "fft":
+ import numpy.fft as fft
+ return fft
+ elif attr == "dtypes":
+ import numpy.dtypes as dtypes
+ return dtypes
+ elif attr == "random":
+ import numpy.random as random
+ return random
+ elif attr == "polynomial":
+ import numpy.polynomial as polynomial
+ return polynomial
+ elif attr == "ma":
+ import numpy.ma as ma
+ return ma
+ elif attr == "ctypeslib":
+ import numpy.ctypeslib as ctypeslib
+ return ctypeslib
+ elif attr == "exceptions":
+ import numpy.exceptions as exceptions
+ return exceptions
+ elif attr == "testing":
+ import numpy.testing as testing
+ return testing
+ elif attr == "matlib":
+ import numpy.matlib as matlib
+ return matlib
+ elif attr == "f2py":
+ import numpy.f2py as f2py
+ return f2py
+ elif attr == "typing":
+ import numpy.typing as typing
+ return typing
+ elif attr == "rec":
+ import numpy.rec as rec
+ return rec
+ elif attr == "char":
+ import numpy.char as char
+ return char
+ elif attr == "array_api":
+ raise AttributeError("`numpy.array_api` is not available from "
+ "numpy 2.0 onwards", name=None)
+ elif attr == "core":
+ import numpy.core as core
+ return core
+ elif attr == "strings":
+ import numpy.strings as strings
+ return strings
+ elif attr == "distutils":
+ if 'distutils' in __numpy_submodules__:
+ import numpy.distutils as distutils
+ return distutils
+ else:
+ raise AttributeError("`numpy.distutils` is not available from "
+ "Python 3.12 onwards", name=None)
+
+ if attr in __future_scalars__:
+ # And future warnings for those that will change, but also give
+ # the AttributeError
+ warnings.warn(
+ f"In the future `np.{attr}` will be defined as the "
+ "corresponding NumPy scalar.", FutureWarning, stacklevel=2)
+
+ if attr in __former_attrs__:
+ raise AttributeError(__former_attrs__[attr], name=None)
+
+ if attr in __expired_attributes__:
+ raise AttributeError(
+ f"`np.{attr}` was removed in the NumPy 2.0 release. "
+ f"{__expired_attributes__[attr]}",
+ name=None
+ )
+
+ if attr == "chararray":
+ warnings.warn(
+ "`np.chararray` is deprecated and will be removed from "
+ "the main namespace in the future. Use an array with a string "
+ "or bytes dtype instead.", DeprecationWarning, stacklevel=2)
+ import numpy.char as char
+ return char.chararray
+
+ raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
+
+ def __dir__():
+ public_symbols = (
+ globals().keys() | __numpy_submodules__
+ )
+ public_symbols -= {
+ "matrixlib", "matlib", "tests", "conftest", "version",
+ "distutils", "array_api"
+ }
+ return list(public_symbols)
+
+ # Pytest testing
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
+
+ def _sanity_check():
+ """
+ Quick sanity checks for common bugs caused by environment.
+ There are some cases e.g. with wrong BLAS ABI that cause wrong
+ results under specific runtime conditions that are not necessarily
+ achieved during test suite runs, and it is useful to catch those early.
+
+ See https://github.com/numpy/numpy/issues/8577 and other
+ similar bug reports.
+
+ """
+ try:
+ x = ones(2, dtype=float32)
+ if not abs(x.dot(x) - float32(2.0)) < 1e-5:
+ raise AssertionError
+ except AssertionError:
+ msg = ("The current Numpy installation ({!r}) fails to "
+ "pass simple sanity checks. This can be caused for example "
+ "by incorrect BLAS library being linked in, or by mixing "
+ "package managers (pip, conda, apt, ...). Search closed "
+ "numpy issues for similar problems.")
+ raise RuntimeError(msg.format(__file__)) from None
+
+ _sanity_check()
+ del _sanity_check
+
+ def _mac_os_check():
+ """
+ Quick Sanity check for Mac OS look for accelerate build bugs.
+ Testing numpy polyfit calls init_dgelsd(LAPACK)
+ """
+ try:
+ c = array([3., 2., 1.])
+ x = linspace(0, 2, 5)
+ y = polyval(c, x)
+ _ = polyfit(x, y, 2, cov=True)
+ except ValueError:
+ pass
+
+ if sys.platform == "darwin":
+ from . import exceptions
+ with warnings.catch_warnings(record=True) as w:
+ _mac_os_check()
+ # Throw runtime error, if the test failed
+ # Check for warning and report the error_message
+ if len(w) > 0:
+ for _wn in w:
+ if _wn.category is exceptions.RankWarning:
+ # Ignore other warnings, they may not be relevant (see gh-25433)
+ error_message = (
+ f"{_wn.category.__name__}: {_wn.message}"
+ )
+ msg = (
+ "Polyfit sanity test emitted a warning, most likely due "
+ "to using a buggy Accelerate backend."
+ "\nIf you compiled yourself, more information is available at:" # noqa: E501
+ "\nhttps://numpy.org/devdocs/building/index.html"
+ "\nOtherwise report this to the vendor "
+ f"that provided NumPy.\n\n{error_message}\n")
+ raise RuntimeError(msg)
+ del _wn
+ del w
+ del _mac_os_check
+
+ def hugepage_setup():
+ """
+ We usually use madvise hugepages support, but on some old kernels it
+ is slow and thus better avoided. Specifically kernel version 4.6
+ had a bug fix which probably fixed this:
+ https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
+ """
+ use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
+ if sys.platform == "linux" and use_hugepage is None:
+ # If there is an issue with parsing the kernel version,
+ # set use_hugepage to 0. Usage of LooseVersion will handle
+ # the kernel version parsing better, but avoided since it
+ # will increase the import time.
+ # See: #16679 for related discussion.
+ try:
+ use_hugepage = 1
+ kernel_version = os.uname().release.split(".")[:2]
+ kernel_version = tuple(int(v) for v in kernel_version)
+ if kernel_version < (4, 6):
+ use_hugepage = 0
+ except ValueError:
+ use_hugepage = 0
+ elif use_hugepage is None:
+ # This is not Linux, so it should not matter, just enable anyway
+ use_hugepage = 1
+ else:
+ use_hugepage = int(use_hugepage)
+ return use_hugepage
+
+ # Note that this will currently only make a difference on Linux
+ _core.multiarray._set_madvise_hugepage(hugepage_setup())
+ del hugepage_setup
+
+ # Give a warning if NumPy is reloaded or imported on a sub-interpreter
+ # We do this from python, since the C-module may not be reloaded and
+ # it is tidier organized.
+ _core.multiarray._multiarray_umath._reload_guard()
+
+ # TODO: Remove the environment variable entirely now that it is "weak"
+ if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"):
+ warnings.warn(
+ "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 "
+ "transition and is ignored after NumPy 2.2.",
+ UserWarning, stacklevel=2)
+
+ # Tell PyInstaller where to find hook-numpy.py
+ def _pyinstaller_hooks_dir():
+ from pathlib import Path
+ return [str(Path(__file__).with_name("_pyinstaller").resolve())]
+
+
+# Remove symbols imported for internal use
+del os, sys, warnings
diff --git a/.venv/lib/python3.12/site-packages/numpy/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/__init__.pyi
new file mode 100644
index 00000000..25397572
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/__init__.pyi
@@ -0,0 +1,5547 @@
+# ruff: noqa: I001
+import builtins
+import sys
+import mmap
+import ctypes as ct
+import array as _array
+import datetime as dt
+from abc import abstractmethod
+from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias
+from decimal import Decimal
+from fractions import Fraction
+from uuid import UUID
+
+import numpy as np
+from numpy.__config__ import show as show_config
+from numpy._pytesttester import PytestTester
+from numpy._core._internal import _ctypes
+
+from numpy._typing import (
+ # Arrays
+ ArrayLike,
+ NDArray,
+ _SupportsArray,
+ _NestedSequence,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat64_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex128_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeNumber_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeBytes_co,
+ _ArrayLikeStr_co,
+ _ArrayLikeString_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co,
+ # DTypes
+ DTypeLike,
+ _DTypeLike,
+ _DTypeLikeVoid,
+ _VoidDTypeLike,
+ # Shapes
+ _AnyShape,
+ _Shape,
+ _ShapeLike,
+ # Scalars
+ _CharLike_co,
+ _IntLike_co,
+ _FloatLike_co,
+ _TD64Like_co,
+ _NumberLike_co,
+ _ScalarLike_co,
+ # `number` precision
+ NBitBase,
+ # NOTE: Do not remove the extended precision bit-types even if seemingly unused;
+ # they're used by the mypy plugin
+ _128Bit,
+ _96Bit,
+ _64Bit,
+ _32Bit,
+ _16Bit,
+ _8Bit,
+ _NBitByte,
+ _NBitShort,
+ _NBitIntC,
+ _NBitIntP,
+ _NBitLong,
+ _NBitLongLong,
+ _NBitHalf,
+ _NBitSingle,
+ _NBitDouble,
+ _NBitLongDouble,
+ # Character codes
+ _BoolCodes,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _Complex64Codes,
+ _Complex128Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _LongCodes,
+ _LongLongCodes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _ULongCodes,
+ _ULongLongCodes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+ _DT64Codes,
+ _TD64Codes,
+ _StrCodes,
+ _BytesCodes,
+ _VoidCodes,
+ _ObjectCodes,
+ _StringCodes,
+ _UnsignedIntegerCodes,
+ _SignedIntegerCodes,
+ _IntegerCodes,
+ _FloatingCodes,
+ _ComplexFloatingCodes,
+ _InexactCodes,
+ _NumberCodes,
+ _CharacterCodes,
+ _FlexibleCodes,
+ _GenericCodes,
+ # Ufuncs
+ _UFunc_Nin1_Nout1,
+ _UFunc_Nin2_Nout1,
+ _UFunc_Nin1_Nout2,
+ _UFunc_Nin2_Nout2,
+ _GUFunc_Nin2_Nout1,
+)
+
+from numpy._typing._callable import (
+ _IntTrueDiv,
+ _UnsignedIntOp,
+ _UnsignedIntBitOp,
+ _UnsignedIntMod,
+ _UnsignedIntDivMod,
+ _SignedIntOp,
+ _SignedIntBitOp,
+ _SignedIntMod,
+ _SignedIntDivMod,
+ _FloatOp,
+ _FloatMod,
+ _FloatDivMod,
+ _NumberOp,
+ _ComparisonOpLT,
+ _ComparisonOpLE,
+ _ComparisonOpGT,
+ _ComparisonOpGE,
+)
+
+# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform
+from numpy._typing._extended_precision import (
+ float96,
+ float128,
+ complex192,
+ complex256,
+)
+
+from numpy._array_api_info import __array_namespace_info__
+
+from collections.abc import (
+ Callable,
+ Iterable,
+ Iterator,
+ Mapping,
+ Sequence,
+)
+
+if sys.version_info >= (3, 12):
+ from collections.abc import Buffer as _SupportsBuffer
+else:
+ _SupportsBuffer: TypeAlias = (
+ bytes
+ | bytearray
+ | memoryview
+ | _array.array[Any]
+ | mmap.mmap
+ | NDArray[Any]
+ | generic
+ )
+
+from typing import (
+ Any,
+ ClassVar,
+ Final,
+ Generic,
+ Literal as L,
+ LiteralString,
+ Never,
+ NoReturn,
+ Protocol,
+ Self,
+ SupportsComplex,
+ SupportsFloat,
+ SupportsInt,
+ SupportsIndex,
+ TypeAlias,
+ TypedDict,
+ final,
+ overload,
+ type_check_only,
+)
+
+# NOTE: `typing_extensions` and `_typeshed` are always available in `.pyi` stubs, even
+# if not available at runtime. This is because the `typeshed` stubs for the standard
+# library include `typing_extensions` stubs:
+# https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi
+from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite
+from typing_extensions import CapsuleType, TypeVar
+
+from numpy import (
+ char,
+ core,
+ ctypeslib,
+ dtypes,
+ exceptions,
+ f2py,
+ fft,
+ lib,
+ linalg,
+ ma,
+ polynomial,
+ random,
+ rec,
+ strings,
+ testing,
+ typing,
+)
+
+# available through `__getattr__`, but not in `__all__` or `__dir__`
+from numpy import (
+ __config__ as __config__,
+ matlib as matlib,
+ matrixlib as matrixlib,
+ version as version,
+)
+if sys.version_info < (3, 12):
+ from numpy import distutils as distutils
+
+from numpy._core.records import (
+ record,
+ recarray,
+)
+
+from numpy._core.function_base import (
+ linspace,
+ logspace,
+ geomspace,
+)
+
+from numpy._core.fromnumeric import (
+ take,
+ reshape,
+ choose,
+ repeat,
+ put,
+ swapaxes,
+ transpose,
+ matrix_transpose,
+ partition,
+ argpartition,
+ sort,
+ argsort,
+ argmax,
+ argmin,
+ searchsorted,
+ resize,
+ squeeze,
+ diagonal,
+ trace,
+ ravel,
+ nonzero,
+ shape,
+ compress,
+ clip,
+ sum,
+ all,
+ any,
+ cumsum,
+ cumulative_sum,
+ ptp,
+ max,
+ min,
+ amax,
+ amin,
+ prod,
+ cumprod,
+ cumulative_prod,
+ ndim,
+ size,
+ around,
+ round,
+ mean,
+ std,
+ var,
+)
+
+from numpy._core._asarray import (
+ require,
+)
+
+from numpy._core._type_aliases import (
+ sctypeDict,
+)
+
+from numpy._core._ufunc_config import (
+ seterr,
+ geterr,
+ setbufsize,
+ getbufsize,
+ seterrcall,
+ geterrcall,
+ _ErrKind,
+ _ErrCall,
+)
+
+from numpy._core.arrayprint import (
+ set_printoptions,
+ get_printoptions,
+ array2string,
+ format_float_scientific,
+ format_float_positional,
+ array_repr,
+ array_str,
+ printoptions,
+)
+
+from numpy._core.einsumfunc import (
+ einsum,
+ einsum_path,
+)
+
+from numpy._core.multiarray import (
+ array,
+ empty_like,
+ empty,
+ zeros,
+ concatenate,
+ inner,
+ where,
+ lexsort,
+ can_cast,
+ min_scalar_type,
+ result_type,
+ dot,
+ vdot,
+ bincount,
+ copyto,
+ putmask,
+ packbits,
+ unpackbits,
+ shares_memory,
+ may_share_memory,
+ asarray,
+ asanyarray,
+ ascontiguousarray,
+ asfortranarray,
+ arange,
+ busday_count,
+ busday_offset,
+ datetime_as_string,
+ datetime_data,
+ frombuffer,
+ fromfile,
+ fromiter,
+ is_busday,
+ promote_types,
+ fromstring,
+ frompyfunc,
+ nested_iters,
+ flagsobj,
+)
+
+from numpy._core.numeric import (
+ zeros_like,
+ ones,
+ ones_like,
+ full,
+ full_like,
+ count_nonzero,
+ isfortran,
+ argwhere,
+ flatnonzero,
+ correlate,
+ convolve,
+ outer,
+ tensordot,
+ roll,
+ rollaxis,
+ moveaxis,
+ cross,
+ indices,
+ fromfunction,
+ isscalar,
+ binary_repr,
+ base_repr,
+ identity,
+ allclose,
+ isclose,
+ array_equal,
+ array_equiv,
+ astype,
+)
+
+from numpy._core.numerictypes import (
+ isdtype,
+ issubdtype,
+ ScalarType,
+ typecodes,
+)
+
+from numpy._core.shape_base import (
+ atleast_1d,
+ atleast_2d,
+ atleast_3d,
+ block,
+ hstack,
+ stack,
+ vstack,
+ unstack,
+)
+
+from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__
+
+from numpy.lib import (
+ scimath as emath,
+)
+
+from numpy.lib._arraypad_impl import (
+ pad,
+)
+
+from numpy.lib._arraysetops_impl import (
+ ediff1d,
+ in1d,
+ intersect1d,
+ isin,
+ setdiff1d,
+ setxor1d,
+ union1d,
+ unique,
+ unique_all,
+ unique_counts,
+ unique_inverse,
+ unique_values,
+)
+
+from numpy.lib._function_base_impl import (
+ select,
+ piecewise,
+ trim_zeros,
+ copy,
+ iterable,
+ percentile,
+ diff,
+ gradient,
+ angle,
+ unwrap,
+ sort_complex,
+ flip,
+ rot90,
+ extract,
+ place,
+ asarray_chkfinite,
+ average,
+ digitize,
+ cov,
+ corrcoef,
+ median,
+ sinc,
+ hamming,
+ hanning,
+ bartlett,
+ blackman,
+ kaiser,
+ trapezoid,
+ trapz,
+ i0,
+ meshgrid,
+ delete,
+ insert,
+ append,
+ interp,
+ quantile,
+)
+
+from numpy._globals import _CopyMode
+
+from numpy.lib._histograms_impl import (
+ histogram_bin_edges,
+ histogram,
+ histogramdd,
+)
+
+from numpy.lib._index_tricks_impl import (
+ ndenumerate,
+ ndindex,
+ ravel_multi_index,
+ unravel_index,
+ mgrid,
+ ogrid,
+ r_,
+ c_,
+ s_,
+ index_exp,
+ ix_,
+ fill_diagonal,
+ diag_indices,
+ diag_indices_from,
+)
+
+from numpy.lib._nanfunctions_impl import (
+ nansum,
+ nanmax,
+ nanmin,
+ nanargmax,
+ nanargmin,
+ nanmean,
+ nanmedian,
+ nanpercentile,
+ nanvar,
+ nanstd,
+ nanprod,
+ nancumsum,
+ nancumprod,
+ nanquantile,
+)
+
+from numpy.lib._npyio_impl import (
+ savetxt,
+ loadtxt,
+ genfromtxt,
+ load,
+ save,
+ savez,
+ savez_compressed,
+ fromregex,
+)
+
+from numpy.lib._polynomial_impl import (
+ poly,
+ roots,
+ polyint,
+ polyder,
+ polyadd,
+ polysub,
+ polymul,
+ polydiv,
+ polyval,
+ polyfit,
+)
+
+from numpy.lib._shape_base_impl import (
+ column_stack,
+ row_stack,
+ dstack,
+ array_split,
+ split,
+ hsplit,
+ vsplit,
+ dsplit,
+ apply_over_axes,
+ expand_dims,
+ apply_along_axis,
+ kron,
+ tile,
+ take_along_axis,
+ put_along_axis,
+)
+
+from numpy.lib._stride_tricks_impl import (
+ broadcast_to,
+ broadcast_arrays,
+ broadcast_shapes,
+)
+
+from numpy.lib._twodim_base_impl import (
+ diag,
+ diagflat,
+ eye,
+ fliplr,
+ flipud,
+ tri,
+ triu,
+ tril,
+ vander,
+ histogram2d,
+ mask_indices,
+ tril_indices,
+ tril_indices_from,
+ triu_indices,
+ triu_indices_from,
+)
+
+from numpy.lib._type_check_impl import (
+ mintypecode,
+ real,
+ imag,
+ iscomplex,
+ isreal,
+ iscomplexobj,
+ isrealobj,
+ nan_to_num,
+ real_if_close,
+ typename,
+ common_type,
+)
+
+from numpy.lib._ufunclike_impl import (
+ fix,
+ isposinf,
+ isneginf,
+)
+
+from numpy.lib._utils_impl import (
+ get_include,
+ info,
+ show_runtime,
+)
+
+from numpy.matrixlib import (
+ asmatrix,
+ bmat,
+)
+
+__all__ = [ # noqa: RUF022
+ # __numpy_submodules__
+ "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg",
+ "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing",
+
+ # _core.__all__
+ "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert",
+ "bitwise_left_shift", "bitwise_right_shift", "concat", "pow", "permute_dims",
+ "memmap", "sctypeDict", "record", "recarray",
+
+ # _core.numeric.__all__
+ "newaxis", "ndarray", "flatiter", "nditer", "nested_iters", "ufunc", "arange",
+ "array", "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "zeros",
+ "count_nonzero", "empty", "broadcast", "dtype", "fromstring", "fromfile",
+ "frombuffer", "from_dlpack", "where", "argwhere", "copyto", "concatenate",
+ "lexsort", "astype", "can_cast", "promote_types", "min_scalar_type", "result_type",
+ "isfortran", "empty_like", "zeros_like", "ones_like", "correlate", "convolve",
+ "inner", "dot", "outer", "vdot", "roll", "rollaxis", "moveaxis", "cross",
+ "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices",
+ "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones",
+ "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_",
+ "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat",
+ "shares_memory", "may_share_memory",
+ "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort",
+ "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod",
+ "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim",
+ "nonzero", "partition", "prod", "ptp", "put", "ravel", "repeat", "reshape",
+ "resize", "round", "searchsorted", "shape", "size", "sort", "squeeze", "std", "sum",
+ "swapaxes", "take", "trace", "transpose", "var",
+ "absolute", "add", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2",
+ "arctanh", "bitwise_and", "bitwise_or", "bitwise_xor", "cbrt", "ceil", "conj",
+ "conjugate", "copysign", "cos", "cosh", "bitwise_count", "deg2rad", "degrees",
+ "divide", "divmod", "e", "equal", "euler_gamma", "exp", "exp2", "expm1", "fabs",
+ "floor", "floor_divide", "float_power", "fmax", "fmin", "fmod", "frexp",
+ "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert",
+ "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less",
+ "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2",
+ "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum",
+ "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive",
+ "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint",
+ "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan",
+ "tanh", "true_divide", "trunc", "ScalarType", "typecodes", "issubdtype",
+ "datetime_data", "datetime_as_string", "busday_offset", "busday_count", "is_busday",
+ "busdaycalendar", "isdtype",
+ "complexfloating", "character", "unsignedinteger", "inexact", "generic", "floating",
+ "integer", "signedinteger", "number", "flexible", "bool", "float16", "float32",
+ "float64", "longdouble", "complex64", "complex128", "clongdouble",
+ "bytes_", "str_", "void", "object_", "datetime64", "timedelta64", "int8", "byte",
+ "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32",
+ "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp",
+ "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint",
+ "float96", "float128", "complex192", "complex256",
+ "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions",
+ "printoptions", "format_float_positional", "format_float_scientific", "require",
+ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
+ "errstate",
+ # _core.function_base.__all__
+ "logspace", "linspace", "geomspace",
+ # _core.getlimits.__all__
+ "finfo", "iinfo",
+ # _core.shape_base.__all__
+ "atleast_1d", "atleast_2d", "atleast_3d", "block", "hstack", "stack", "unstack",
+ "vstack",
+ # _core.einsumfunc.__all__
+ "einsum", "einsum_path",
+ # matrixlib.__all__
+ "matrix", "bmat", "asmatrix",
+ # lib._histograms_impl.__all__
+ "histogram", "histogramdd", "histogram_bin_edges",
+ # lib._nanfunctions_impl.__all__
+ "nansum", "nanmax", "nanmin", "nanargmax", "nanargmin", "nanmean", "nanmedian",
+ "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod",
+ "nanquantile",
+ # lib._function_base_impl.__all__
+ "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff",
+ "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place",
+ "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov",
+ "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman",
+ "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append",
+ "interp", "quantile",
+ # lib._twodim_base_impl.__all__
+ "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander",
+ "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices",
+ "triu_indices_from",
+ # lib._shape_base_impl.__all__
+ "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit",
+ "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile",
+ "take_along_axis", "put_along_axis", "row_stack",
+ # lib._type_check_impl.__all__
+ "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real",
+ "real_if_close", "typename", "mintypecode", "common_type",
+ # lib._arraysetops_impl.__all__
+ "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d",
+ "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values",
+ # lib._ufunclike_impl.__all__
+ "fix", "isneginf", "isposinf",
+ # lib._arraypad_impl.__all__
+ "pad",
+ # lib._utils_impl.__all__
+ "get_include", "info", "show_runtime",
+ # lib._stride_tricks_impl.__all__
+ "broadcast_to", "broadcast_arrays", "broadcast_shapes",
+ # lib._polynomial_impl.__all__
+ "poly", "roots", "polyint", "polyder", "polyadd", "polysub", "polymul", "polydiv",
+ "polyval", "poly1d", "polyfit",
+ # lib._npyio_impl.__all__
+ "savetxt", "loadtxt", "genfromtxt", "load", "save", "savez", "savez_compressed",
+ "packbits", "unpackbits", "fromregex",
+ # lib._index_tricks_impl.__all__
+ "ravel_multi_index", "unravel_index", "mgrid", "ogrid", "r_", "c_", "s_",
+ "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices",
+ "diag_indices_from",
+
+ # __init__.__all__
+ "emath", "show_config", "__version__", "__array_namespace_info__",
+] # fmt: skip
+
+### Constrained types (for internal use only)
+# Only use these for functions; never as generic type parameter.
+
+_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes)
+_AnyShapeT = TypeVar(
+ "_AnyShapeT",
+ tuple[()], # 0-d
+ tuple[int], # 1-d
+ tuple[int, int], # 2-d
+ tuple[int, int, int], # 3-d
+ tuple[int, int, int, int], # 4-d
+ tuple[int, int, int, int, int], # 5-d
+ tuple[int, int, int, int, int, int], # 6-d
+ tuple[int, int, int, int, int, int, int], # 7-d
+ tuple[int, int, int, int, int, int, int, int], # 8-d
+ tuple[int, ...], # N-d
+)
+_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None)
+_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None)
+_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None)
+_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime)
+_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta)
+
+### Type parameters (for internal use only)
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_RealT_co = TypeVar("_RealT_co", covariant=True)
+_ImagT_co = TypeVar("_ImagT_co", covariant=True)
+
+_CallableT = TypeVar("_CallableT", bound=Callable[..., object])
+
+_DTypeT = TypeVar("_DTypeT", bound=dtype)
+_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True)
+_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible])
+
+_ArrayT = TypeVar("_ArrayT", bound=ndarray)
+_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True)
+_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer | np.bool | object_])
+_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_])
+_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_])
+
+_ShapeT = TypeVar("_ShapeT", bound=_Shape)
+_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True)
+_1DShapeT = TypeVar("_1DShapeT", bound=_1D)
+_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True)
+_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ...
+
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True)
+_NumberT = TypeVar("_NumberT", bound=number)
+_InexactT = TypeVar("_InexactT", bound=inexact)
+_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer)
+_FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True)
+_IntegerT = TypeVar("_IntegerT", bound=integer)
+_IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True)
+_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64)
+
+_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated]
+_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated]
+_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated]
+
+_ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True)
+_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool)
+_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True)
+_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True)
+_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True)
+_FlexibleItemT_co = TypeVar(
+ "_FlexibleItemT_co",
+ bound=_CharLike_co | tuple[Any, ...],
+ default=_CharLike_co | tuple[Any, ...],
+ covariant=True,
+)
+_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True)
+_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True)
+_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True)
+_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit)
+_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool])
+
+### Type Aliases (for internal use only)
+
+_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]]
+_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]]
+
+_1D: TypeAlias = tuple[int]
+_2D: TypeAlias = tuple[int, int]
+_2Tuple: TypeAlias = tuple[_T, _T]
+
+_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool]
+_ArrayInt_co: TypeAlias = NDArray[integer | np.bool]
+_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool]
+_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool]
+_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool]
+_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool]
+_ArrayNumber_co: TypeAlias = NDArray[number | np.bool]
+_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool]
+
+_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool
+_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool
+_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co
+
+_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None
+_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...]
+
+_UnsignedIntegerCType: TypeAlias = type[
+ ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64
+ | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong
+ | ct.c_size_t | ct.c_void_p
+] # fmt: skip
+_SignedIntegerCType: TypeAlias = type[
+ ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64
+ | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong
+ | ct.c_ssize_t
+] # fmt: skip
+_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble]
+_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType
+_NumberCType: TypeAlias = _IntegerCType
+_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]]
+
+# some commonly used builtin types that are known to result in a
+# `dtype[object_]`, when their *type* is passed to the `dtype` constructor
+# NOTE: `builtins.object` should not be included here
+_BuiltinObjectLike: TypeAlias = (
+ slice | Decimal | Fraction | UUID
+ | dt.date | dt.time | dt.timedelta | dt.tzinfo
+ | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any]
+) # fmt: skip
+
+# Introduce an alias for `dtype` to avoid naming conflicts.
+_dtype: TypeAlias = dtype[_ScalarT]
+
+_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"]
+# can be anything, is case-insensitive, and only the first character matters
+_ByteOrder: TypeAlias = L[
+ "S", # swap the current order (default)
+ "<", "L", "little", # little-endian
+ ">", "B", "big", # big endian
+ "=", "N", "native", # native order
+ "|", "I", # ignore
+] # fmt: skip
+_DTypeKind: TypeAlias = L[
+ "b", # boolean
+ "i", # signed integer
+ "u", # unsigned integer
+ "f", # floating-point
+ "c", # complex floating-point
+ "m", # timedelta64
+ "M", # datetime64
+ "O", # python object
+ "S", # byte-string (fixed-width)
+ "U", # unicode-string (fixed-width)
+ "V", # void
+ "T", # unicode-string (variable-width)
+]
+_DTypeChar: TypeAlias = L[
+ "?", # bool
+ "b", # byte
+ "B", # ubyte
+ "h", # short
+ "H", # ushort
+ "i", # intc
+ "I", # uintc
+ "l", # long
+ "L", # ulong
+ "q", # longlong
+ "Q", # ulonglong
+ "e", # half
+ "f", # single
+ "d", # double
+ "g", # longdouble
+ "F", # csingle
+ "D", # cdouble
+ "G", # clongdouble
+ "O", # object
+ "S", # bytes_ (S0)
+ "a", # bytes_ (deprecated)
+ "U", # str_
+ "V", # void
+ "M", # datetime64
+ "m", # timedelta64
+ "c", # bytes_ (S1)
+ "T", # StringDType
+]
+_DTypeNum: TypeAlias = L[
+ 0, # bool
+ 1, # byte
+ 2, # ubyte
+ 3, # short
+ 4, # ushort
+ 5, # intc
+ 6, # uintc
+ 7, # long
+ 8, # ulong
+ 9, # longlong
+ 10, # ulonglong
+ 23, # half
+ 11, # single
+ 12, # double
+ 13, # longdouble
+ 14, # csingle
+ 15, # cdouble
+ 16, # clongdouble
+ 17, # object
+ 18, # bytes_
+ 19, # str_
+ 20, # void
+ 21, # datetime64
+ 22, # timedelta64
+ 25, # no type
+ 256, # user-defined
+ 2056, # StringDType
+]
+_DTypeBuiltinKind: TypeAlias = L[0, 1, 2]
+
+_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"]
+
+_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"]
+
+_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None
+_OrderACF: TypeAlias = L["A", "C", "F"] | None
+_OrderCF: TypeAlias = L["C", "F"] | None
+
+_ModeKind: TypeAlias = L["raise", "wrap", "clip"]
+_PartitionKind: TypeAlias = L["introselect"]
+# in practice, only the first case-insensitive character is considered (so e.g.
+# "QuantumSort3000" will be interpreted as quicksort).
+_SortKind: TypeAlias = L[
+ "Q", "quick", "quicksort",
+ "M", "merge", "mergesort",
+ "H", "heap", "heapsort",
+ "S", "stable", "stablesort",
+]
+_SortSide: TypeAlias = L["left", "right"]
+
+_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co
+_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co
+_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co
+_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None
+_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None
+
+_NDIterFlagsKind: TypeAlias = L[
+ "buffered",
+ "c_index",
+ "copy_if_overlap",
+ "common_dtype",
+ "delay_bufalloc",
+ "external_loop",
+ "f_index",
+ "grow_inner", "growinner",
+ "multi_index",
+ "ranged",
+ "refs_ok",
+ "reduce_ok",
+ "zerosize_ok",
+]
+_NDIterFlagsOp: TypeAlias = L[
+ "aligned",
+ "allocate",
+ "arraymask",
+ "copy",
+ "config",
+ "nbo",
+ "no_subtype",
+ "no_broadcast",
+ "overlap_assume_elementwise",
+ "readonly",
+ "readwrite",
+ "updateifcopy",
+ "virtual",
+ "writeonly",
+ "writemasked"
+]
+
+_MemMapModeKind: TypeAlias = L[
+ "readonly", "r",
+ "copyonwrite", "c",
+ "readwrite", "r+",
+ "write", "w+",
+]
+
+_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"]
+_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"]
+_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"]
+
+_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"]
+_DayUnit: TypeAlias = L["W", "D", b"W", b"D"]
+_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit]
+_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"]
+_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"]
+_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit]
+_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit]
+_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit]
+_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit]
+_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex]
+
+### TypedDict's (for internal use only)
+
+@type_check_only
+class _FormerAttrsDict(TypedDict):
+ object: LiteralString
+ float: LiteralString
+ complex: LiteralString
+ str: LiteralString
+ int: LiteralString
+
+### Protocols (for internal use only)
+
+@type_check_only
+class _SupportsFileMethods(SupportsFlush, Protocol):
+ # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile`
+ def fileno(self) -> SupportsIndex: ...
+ def tell(self) -> SupportsIndex: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
+@type_check_only
+class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ...
+
+@type_check_only
+class _SupportsItem(Protocol[_T_co]):
+ def item(self, /) -> _T_co: ...
+
+@type_check_only
+class _SupportsDLPack(Protocol[_T_contra]):
+ def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ...
+
+@type_check_only
+class _HasDType(Protocol[_T_co]):
+ @property
+ def dtype(self, /) -> _T_co: ...
+
+@type_check_only
+class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]):
+ @property
+ def real(self, /) -> _RealT_co: ...
+ @property
+ def imag(self, /) -> _ImagT_co: ...
+
+@type_check_only
+class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]):
+ @property
+ def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ...
+
+@type_check_only
+class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]):
+ @property
+ def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ...
+
+@type_check_only
+class _HasDateAttributes(Protocol):
+ # The `datetime64` constructors requires an object with the three attributes below,
+ # and thus supports datetime duck typing
+ @property
+ def day(self) -> int: ...
+ @property
+ def month(self) -> int: ...
+ @property
+ def year(self) -> int: ...
+
+### Mixins (for internal use only)
+
+@type_check_only
+class _RealMixin:
+ @property
+ def real(self) -> Self: ...
+ @property
+ def imag(self) -> Self: ...
+
+@type_check_only
+class _RoundMixin:
+ @overload
+ def __round__(self, /, ndigits: None = None) -> int: ...
+ @overload
+ def __round__(self, /, ndigits: SupportsIndex) -> Self: ...
+
+@type_check_only
+class _IntegralMixin(_RealMixin):
+ @property
+ def numerator(self) -> Self: ...
+ @property
+ def denominator(self) -> L[1]: ...
+
+ def is_integer(self, /) -> L[True]: ...
+
+### Public API
+
+__version__: Final[LiteralString] = ...
+
+e: Final[float] = ...
+euler_gamma: Final[float] = ...
+pi: Final[float] = ...
+inf: Final[float] = ...
+nan: Final[float] = ...
+little_endian: Final[builtins.bool] = ...
+False_: Final[np.bool[L[False]]] = ...
+True_: Final[np.bool[L[True]]] = ...
+newaxis: Final[None] = None
+
+# not in __all__
+__NUMPY_SETUP__: Final[L[False]] = False
+__numpy_submodules__: Final[set[LiteralString]] = ...
+__former_attrs__: Final[_FormerAttrsDict] = ...
+__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ...
+__array_api_version__: Final[L["2024.12"]] = "2024.12"
+test: Final[PytestTester] = ...
+
+@type_check_only
+class _DTypeMeta(type):
+ @property
+ def type(cls, /) -> type[generic] | None: ...
+ @property
+ def _abstract(cls, /) -> bool: ...
+ @property
+ def _is_numeric(cls, /) -> bool: ...
+ @property
+ def _parametric(cls, /) -> bool: ...
+ @property
+ def _legacy(cls, /) -> bool: ...
+
+@final
+class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta):
+ names: tuple[builtins.str, ...] | None
+ def __hash__(self) -> int: ...
+
+ # `None` results in the default dtype
+ @overload
+ def __new__(
+ cls,
+ dtype: type[float64] | None,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...
+ ) -> dtype[float64]: ...
+
+ # Overload for `dtype` instances, scalar types, and instances that have a
+ # `dtype: dtype[_ScalarT]` attribute
+ @overload
+ def __new__(
+ cls,
+ dtype: _DTypeLike[_ScalarT],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[_ScalarT]: ...
+
+ # Builtin types
+ #
+ # NOTE: Typecheckers act as if `bool <: int <: float <: complex <: object`,
+ # even though at runtime `int`, `float`, and `complex` aren't subtypes..
+ # This makes it impossible to express e.g. "a float that isn't an int",
+ # since type checkers treat `_: float` like `_: float | int`.
+ #
+ # For more details, see:
+ # - https://github.com/numpy/numpy/issues/27032#issuecomment-2278958251
+ # - https://typing.readthedocs.io/en/latest/spec/special-types.html#special-cases-for-float-and-complex
+ @overload
+ def __new__(
+ cls,
+ dtype: type[builtins.bool | np.bool],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[np.bool]: ...
+ # NOTE: `_: type[int]` also accepts `type[int | bool]`
+ @overload
+ def __new__(
+ cls,
+ dtype: type[int | int_ | np.bool],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[int_ | np.bool]: ...
+ # NOTE: `_: type[float]` also accepts `type[float | int | bool]`
+ # NOTE: `float64` inherits from `float` at runtime; but this isn't
+ # reflected in these stubs. So an explicit `float64` is required here.
+ @overload
+ def __new__(
+ cls,
+ dtype: type[float | float64 | int_ | np.bool] | None,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[float64 | int_ | np.bool]: ...
+ # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]`
+ @overload
+ def __new__(
+ cls,
+ dtype: type[complex | complex128 | float64 | int_ | np.bool],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[complex128 | float64 | int_ | np.bool]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: type[bytes], # also includes `type[bytes_]`
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[bytes_]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: type[str], # also includes `type[str_]`
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[str_]: ...
+ # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to
+ # be run with the (undocumented) `--disable-memoryview-promotion` flag,
+ # This will be the default in a future mypy release, see:
+ # https://github.com/python/mypy/issues/15313
+ # Pyright / Pylance requires setting `disableBytesTypePromotions=true`,
+ # which is the default in strict mode
+ @overload
+ def __new__(
+ cls,
+ dtype: type[memoryview | void],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[void]: ...
+ # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`,
+ # and is therefore not included here
+ @overload
+ def __new__(
+ cls,
+ dtype: type[_BuiltinObjectLike | object_],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[object_]: ...
+
+ # Unions of builtins.
+ @overload
+ def __new__(
+ cls,
+ dtype: type[bytes | str],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[character]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: type[bytes | str | memoryview],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[flexible]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[str, Any] = ...,
+ ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ...
+
+ # `unsignedinteger` string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ...
+ @overload
+ def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ...
+ @overload
+ def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ...
+ @overload
+ def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ...
+ @overload
+ def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ...
+ @overload
+ def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ...
+ @overload
+ def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ...
+ # NOTE: We're assuming here that `uint_ptr_t == size_t`,
+ # an assumption that does not hold in rare cases (same for `ssize_t`)
+ @overload
+ def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ...
+ @overload
+ def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ...
+ @overload
+ def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ...
+
+ # `signedinteger` string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ...
+ @overload
+ def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ...
+ @overload
+ def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ...
+ @overload
+ def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ...
+ @overload
+ def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ...
+ @overload
+ def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ...
+ @overload
+ def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ...
+ @overload
+ def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ...
+ @overload
+ def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ...
+ @overload
+ def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ...
+
+ # `floating` string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ...
+ @overload
+ def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ...
+ @overload
+ def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ...
+ @overload
+ def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ...
+ @overload
+ def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ...
+ @overload
+ def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ...
+ @overload
+ def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ...
+
+ # `complexfloating` string-based representations
+ @overload
+ def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ...
+ @overload
+ def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ...
+ @overload
+ def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ...
+ @overload
+ def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ...
+ @overload
+ def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ...
+
+ # Miscellaneous string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ...
+ @overload
+ def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ...
+ @overload
+ def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ...
+ @overload
+ def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ...
+ @overload
+ def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ...
+ @overload
+ def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ...
+ @overload
+ def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ...
+
+ # `StringDType` requires special treatment because it has no scalar type
+ @overload
+ def __new__(
+ cls,
+ dtype: dtypes.StringDType | _StringCodes,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...
+ ) -> dtypes.StringDType: ...
+
+ # Combined char-codes and ctypes, analogous to the scalar-type hierarchy
+ @overload
+ def __new__(
+ cls,
+ dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[unsignedinteger]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _SignedIntegerCodes | _SignedIntegerCType,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[signedinteger]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _IntegerCodes | _IntegerCType,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[integer]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _FloatingCodes | _FloatingCType,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[floating]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _ComplexFloatingCodes,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[complexfloating]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _InexactCodes | _FloatingCType,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[inexact]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _NumberCodes | _NumberCType,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[number]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _CharacterCodes | type[ct.c_char],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[character]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _FlexibleCodes | type[ct.c_char],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[flexible]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _GenericCodes | _GenericCType,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[generic]: ...
+
+ # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ...
+ @overload
+ def __new__(
+ cls,
+ dtype: builtins.str,
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype: ...
+
+ # Catch-all overload for object-likes
+ # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some
+ # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes
+ # the subtyping relation, the (gradual) typing analogue of `issubclass()`).
+ # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types
+ @overload
+ def __new__(
+ cls,
+ dtype: type[object],
+ align: builtins.bool = ...,
+ copy: builtins.bool = ...,
+ metadata: dict[builtins.str, Any] = ...,
+ ) -> dtype[object_ | Any]: ...
+
+ def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
+
+ @overload
+ def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ...
+ @overload
+ def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ...
+
+ # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes
+ @overload
+ def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ...
+ @overload
+ def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ...
+ @overload
+ def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ...
+
+ # NOTE: `__rmul__` seems to be broken when used in combination with
+ # literals as of mypy 0.902. Set the return-type to `dtype` for
+ # now for non-flexible dtypes.
+ @overload
+ def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ...
+ @overload
+ def __rmul__(self, value: SupportsIndex, /) -> dtype: ...
+
+ def __gt__(self, other: DTypeLike, /) -> builtins.bool: ...
+ def __ge__(self, other: DTypeLike, /) -> builtins.bool: ...
+ def __lt__(self, other: DTypeLike, /) -> builtins.bool: ...
+ def __le__(self, other: DTypeLike, /) -> builtins.bool: ...
+
+ # Explicitly defined `__eq__` and `__ne__` to get around mypy's
+ # `strict_equality` option; even though their signatures are
+ # identical to their `object`-based counterpart
+ def __eq__(self, other: Any, /) -> builtins.bool: ...
+ def __ne__(self, other: Any, /) -> builtins.bool: ...
+
+ @property
+ def alignment(self) -> int: ...
+ @property
+ def base(self) -> dtype: ...
+ @property
+ def byteorder(self) -> _ByteOrderChar: ...
+ @property
+ def char(self) -> _DTypeChar: ...
+ @property
+ def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ...
+ @property
+ def fields(self,) -> MappingProxyType[LiteralString, tuple[dtype, int] | tuple[dtype, int, Any]] | None: ...
+ @property
+ def flags(self) -> int: ...
+ @property
+ def hasobject(self) -> builtins.bool: ...
+ @property
+ def isbuiltin(self) -> _DTypeBuiltinKind: ...
+ @property
+ def isnative(self) -> builtins.bool: ...
+ @property
+ def isalignedstruct(self) -> builtins.bool: ...
+ @property
+ def itemsize(self) -> int: ...
+ @property
+ def kind(self) -> _DTypeKind: ...
+ @property
+ def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ...
+ @property
+ def name(self) -> LiteralString: ...
+ @property
+ def num(self) -> _DTypeNum: ...
+ @property
+ def shape(self) -> _AnyShape: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def subdtype(self) -> tuple[dtype, _AnyShape] | None: ...
+ def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ...
+ @property
+ def str(self) -> LiteralString: ...
+ @property
+ def type(self) -> type[_ScalarT_co]: ...
+
+@final
+class flatiter(Generic[_ArrayT_co]):
+ __hash__: ClassVar[None]
+ @property
+ def base(self) -> _ArrayT_co: ...
+ @property
+ def coords(self) -> _Shape: ...
+ @property
+ def index(self) -> int: ...
+ def copy(self) -> _ArrayT_co: ...
+ def __iter__(self) -> Self: ...
+ def __next__(self: flatiter[NDArray[_ScalarT]]) -> _ScalarT: ...
+ def __len__(self) -> int: ...
+ @overload
+ def __getitem__(
+ self: flatiter[NDArray[_ScalarT]],
+ key: int | integer | tuple[int | integer],
+ ) -> _ScalarT: ...
+ @overload
+ def __getitem__(
+ self,
+ key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType],
+ ) -> _ArrayT_co: ...
+ # TODO: `__setitem__` operates via `unsafe` casting rules, and can
+ # thus accept any type accepted by the relevant underlying `np.generic`
+ # constructor.
+ # This means that `value` must in reality be a supertype of `npt.ArrayLike`.
+ def __setitem__(
+ self,
+ key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType],
+ value: Any,
+ ) -> None: ...
+ @overload
+ def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DTypeT]: ...
+ @overload
+ def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ...
+ @overload
+ def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = ..., /) -> ndarray[_AnyShape, _DTypeT]: ...
+ @overload
+ def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ...
+
+@type_check_only
+class _ArrayOrScalarCommon:
+ @property
+ def real(self, /) -> Any: ...
+ @property
+ def imag(self, /) -> Any: ...
+ @property
+ def T(self) -> Self: ...
+ @property
+ def mT(self) -> Self: ...
+ @property
+ def data(self) -> memoryview: ...
+ @property
+ def flags(self) -> flagsobj: ...
+ @property
+ def itemsize(self) -> int: ...
+ @property
+ def nbytes(self) -> int: ...
+ @property
+ def device(self) -> L["cpu"]: ...
+
+ def __bool__(self, /) -> builtins.bool: ...
+ def __int__(self, /) -> int: ...
+ def __float__(self, /) -> float: ...
+ def __copy__(self) -> Self: ...
+ def __deepcopy__(self, memo: dict[int, Any] | None, /) -> Self: ...
+
+ # TODO: How to deal with the non-commutative nature of `==` and `!=`?
+ # xref numpy/numpy#17368
+ def __eq__(self, other: Any, /) -> Any: ...
+ def __ne__(self, other: Any, /) -> Any: ...
+
+ def copy(self, order: _OrderKACF = ...) -> Self: ...
+ def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ...
+ def dumps(self) -> bytes: ...
+ def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
+ def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ...
+ # generics and 0d arrays return builtin scalars
+ def tolist(self) -> Any: ...
+ def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ...
+
+ @property
+ def __array_interface__(self) -> dict[str, Any]: ...
+ @property
+ def __array_priority__(self) -> float: ...
+ @property
+ def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule
+ def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ...
+ def __setstate__(self, state: tuple[
+ SupportsIndex, # version
+ _ShapeLike, # Shape
+ _DTypeT_co, # DType
+ np.bool, # F-continuous
+ bytes | list[Any], # Data
+ ], /) -> None: ...
+
+ def conj(self) -> Self: ...
+ def conjugate(self) -> Self: ...
+
+ def argsort(
+ self,
+ axis: SupportsIndex | None = ...,
+ kind: _SortKind | None = ...,
+ order: str | Sequence[str] | None = ...,
+ *,
+ stable: bool | None = ...,
+ ) -> NDArray[Any]: ...
+
+ @overload # axis=None (default), out=None (default), keepdims=False (default)
+ def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ...
+ @overload # axis=index, out=None (default)
+ def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ...
+ @overload # axis=index, out=ndarray
+ def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ...
+ @overload
+ def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ...
+
+ @overload # axis=None (default), out=None (default), keepdims=False (default)
+ def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ...
+ @overload # axis=index, out=None (default)
+ def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ...
+ @overload # axis=index, out=ndarray
+ def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ...
+ @overload
+ def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ...
+
+ @overload # out=None (default)
+ def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ...
+ @overload # out=ndarray
+ def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ...
+
+ @overload # out=None (default)
+ def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ...
+ @overload # out=ndarray
+ def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ...
+
+ # TODO: Annotate kwargs with an unpacked `TypedDict`
+ @overload # out: None (default)
+ def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, out: None = None, **kwargs: Any) -> NDArray[Any]: ...
+ @overload
+ def clip(self, /, min: None, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ...
+ @overload
+ def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ...
+ @overload # out: ndarray
+ def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ...
+ @overload
+ def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ...
+ @overload
+ def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ...
+ @overload
+ def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ...
+
+ @overload
+ def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ...
+ @overload
+ def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ...
+
+ @overload # out: None (default)
+ def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ...
+ @overload # out: ndarray
+ def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ...
+
+ @overload # out: None (default)
+ def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ...
+ @overload # out: ndarray
+ def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ...
+
+ @overload
+ def max(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ out: None = None,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = True,
+ ) -> Any: ...
+ @overload
+ def max(
+ self,
+ /,
+ axis: _ShapeLike | None,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+ @overload
+ def max(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+
+ @overload
+ def min(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ out: None = None,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = True,
+ ) -> Any: ...
+ @overload
+ def min(
+ self,
+ /,
+ axis: _ShapeLike | None,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+ @overload
+ def min(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+
+ @overload
+ def sum(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ out: None = None,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = 0,
+ where: _ArrayLikeBool_co = True,
+ ) -> Any: ...
+ @overload
+ def sum(
+ self,
+ /,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike | None,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = 0,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+ @overload
+ def sum(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = 0,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+
+ @overload
+ def prod(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ out: None = None,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = 1,
+ where: _ArrayLikeBool_co = True,
+ ) -> Any: ...
+ @overload
+ def prod(
+ self,
+ /,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike | None,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = 1,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+ @overload
+ def prod(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ initial: _NumberLike_co = 1,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+
+ @overload
+ def mean(
+ self,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ out: None = None,
+ keepdims: builtins.bool = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ ) -> Any: ...
+ @overload
+ def mean(
+ self,
+ /,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike | None,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+ @overload
+ def mean(
+ self,
+ /,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: builtins.bool = False,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+
+ @overload
+ def std(
+ self,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ out: None = None,
+ ddof: float = 0,
+ keepdims: builtins.bool = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ mean: _ArrayLikeNumber_co = ...,
+ correction: float = ...,
+ ) -> Any: ...
+ @overload
+ def std(
+ self,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike | None,
+ out: _ArrayT,
+ ddof: float = 0,
+ keepdims: builtins.bool = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ mean: _ArrayLikeNumber_co = ...,
+ correction: float = ...,
+ ) -> _ArrayT: ...
+ @overload
+ def std(
+ self,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ *,
+ out: _ArrayT,
+ ddof: float = 0,
+ keepdims: builtins.bool = False,
+ where: _ArrayLikeBool_co = True,
+ mean: _ArrayLikeNumber_co = ...,
+ correction: float = ...,
+ ) -> _ArrayT: ...
+
+ @overload
+ def var(
+ self,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ out: None = None,
+ ddof: float = 0,
+ keepdims: builtins.bool = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ mean: _ArrayLikeNumber_co = ...,
+ correction: float = ...,
+ ) -> Any: ...
+ @overload
+ def var(
+ self,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike | None,
+ out: _ArrayT,
+ ddof: float = 0,
+ keepdims: builtins.bool = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ mean: _ArrayLikeNumber_co = ...,
+ correction: float = ...,
+ ) -> _ArrayT: ...
+ @overload
+ def var(
+ self,
+ axis: _ShapeLike | None = None,
+ dtype: DTypeLike | None = None,
+ *,
+ out: _ArrayT,
+ ddof: float = 0,
+ keepdims: builtins.bool = False,
+ where: _ArrayLikeBool_co = True,
+ mean: _ArrayLikeNumber_co = ...,
+ correction: float = ...,
+ ) -> _ArrayT: ...
+
+class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]):
+ __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride]
+ @property
+ def base(self) -> NDArray[Any] | None: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def size(self) -> int: ...
+ @property
+ def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ...
+ @real.setter
+ def real(self, value: ArrayLike, /) -> None: ...
+ @property
+ def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ...
+ @imag.setter
+ def imag(self, value: ArrayLike, /) -> None: ...
+
+ def __new__(
+ cls,
+ shape: _ShapeLike,
+ dtype: DTypeLike = ...,
+ buffer: _SupportsBuffer | None = ...,
+ offset: SupportsIndex = ...,
+ strides: _ShapeLike | None = ...,
+ order: _OrderKACF = ...,
+ ) -> Self: ...
+
+ if sys.version_info >= (3, 12):
+ def __buffer__(self, flags: int, /) -> memoryview: ...
+
+ def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
+
+ @overload
+ def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ...
+
+ def __array_ufunc__(
+ self,
+ ufunc: ufunc,
+ method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"],
+ *inputs: Any,
+ **kwargs: Any,
+ ) -> Any: ...
+
+ def __array_function__(
+ self,
+ func: Callable[..., Any],
+ types: Iterable[type],
+ args: Iterable[Any],
+ kwargs: Mapping[str, Any],
+ ) -> Any: ...
+
+ # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__`
+ # is a pseudo-abstract method the type has been narrowed down in order to
+ # grant subclasses a bit more flexibility
+ def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ...
+
+ def __array_wrap__(
+ self,
+ array: ndarray[_ShapeT, _DTypeT],
+ context: tuple[ufunc, tuple[Any, ...], int] | None = ...,
+ return_scalar: builtins.bool = ...,
+ /,
+ ) -> ndarray[_ShapeT, _DTypeT]: ...
+
+ @overload
+ def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ...
+ @overload
+ def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ...
+ @overload
+ def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ...
+
+ @overload # flexible | object_ | bool
+ def __setitem__(
+ self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType],
+ key: _ToIndices,
+ value: object,
+ /,
+ ) -> None: ...
+ @overload # integer
+ def __setitem__(
+ self: NDArray[integer],
+ key: _ToIndices,
+ value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co,
+ /,
+ ) -> None: ...
+ @overload # floating
+ def __setitem__(
+ self: NDArray[floating],
+ key: _ToIndices,
+ value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None,
+ /,
+ ) -> None: ...
+ @overload # complexfloating
+ def __setitem__(
+ self: NDArray[complexfloating],
+ key: _ToIndices,
+ value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None,
+ /,
+ ) -> None: ...
+ @overload # timedelta64
+ def __setitem__(
+ self: NDArray[timedelta64],
+ key: _ToIndices,
+ value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64],
+ /,
+ ) -> None: ...
+ @overload # datetime64
+ def __setitem__(
+ self: NDArray[datetime64],
+ key: _ToIndices,
+ value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64],
+ /,
+ ) -> None: ...
+ @overload # void
+ def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ...
+ @overload # catch-all
+ def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ...
+
+ @property
+ def ctypes(self) -> _ctypes[int]: ...
+ @property
+ def shape(self) -> _ShapeT_co: ...
+ @shape.setter
+ def shape(self, value: _ShapeLike) -> None: ...
+ @property
+ def strides(self) -> _Shape: ...
+ @strides.setter
+ def strides(self, value: _ShapeLike) -> None: ...
+ def byteswap(self, inplace: builtins.bool = ...) -> Self: ...
+ def fill(self, value: Any, /) -> None: ...
+ @property
+ def flat(self) -> flatiter[Self]: ...
+
+ @overload # use the same output type as that of the underlying `generic`
+ def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ...
+ @overload # special casing for `StringDType`, which has no scalar type
+ def item(
+ self: ndarray[Any, dtypes.StringDType],
+ arg0: SupportsIndex | tuple[SupportsIndex, ...] = ...,
+ /,
+ *args: SupportsIndex,
+ ) -> str: ...
+
+ @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape`
+ def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ...
+ @overload
+ def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ...
+ @overload
+ def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ...
+ @overload
+ def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ...
+ @overload
+ def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ...
+ @overload
+ def tolist(self, /) -> Any: ...
+
+ @overload
+ def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ...
+ @overload
+ def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ...
+
+ def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ...
+
+ def squeeze(
+ self,
+ axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ...,
+ ) -> ndarray[_AnyShape, _DTypeT_co]: ...
+
+ def swapaxes(
+ self,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+ ) -> ndarray[_AnyShape, _DTypeT_co]: ...
+
+ @overload
+ def transpose(self, axes: _ShapeLike | None, /) -> Self: ...
+ @overload
+ def transpose(self, *axes: SupportsIndex) -> Self: ...
+
+ @overload
+ def all(
+ self,
+ axis: None = None,
+ out: None = None,
+ keepdims: L[False, 0] = False,
+ *,
+ where: _ArrayLikeBool_co = True
+ ) -> np.bool: ...
+ @overload
+ def all(
+ self,
+ axis: int | tuple[int, ...] | None = None,
+ out: None = None,
+ keepdims: SupportsIndex = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ ) -> np.bool | NDArray[np.bool]: ...
+ @overload
+ def all(
+ self,
+ axis: int | tuple[int, ...] | None,
+ out: _ArrayT,
+ keepdims: SupportsIndex = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+ @overload
+ def all(
+ self,
+ axis: int | tuple[int, ...] | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: SupportsIndex = False,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+
+ @overload
+ def any(
+ self,
+ axis: None = None,
+ out: None = None,
+ keepdims: L[False, 0] = False,
+ *,
+ where: _ArrayLikeBool_co = True
+ ) -> np.bool: ...
+ @overload
+ def any(
+ self,
+ axis: int | tuple[int, ...] | None = None,
+ out: None = None,
+ keepdims: SupportsIndex = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ ) -> np.bool | NDArray[np.bool]: ...
+ @overload
+ def any(
+ self,
+ axis: int | tuple[int, ...] | None,
+ out: _ArrayT,
+ keepdims: SupportsIndex = False,
+ *,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+ @overload
+ def any(
+ self,
+ axis: int | tuple[int, ...] | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: SupportsIndex = False,
+ where: _ArrayLikeBool_co = True,
+ ) -> _ArrayT: ...
+
+ #
+ @overload
+ def partition(
+ self,
+ /,
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex = -1,
+ kind: _PartitionKind = "introselect",
+ order: None = None,
+ ) -> None: ...
+ @overload
+ def partition(
+ self: NDArray[void],
+ /,
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex = -1,
+ kind: _PartitionKind = "introselect",
+ order: str | Sequence[str] | None = None,
+ ) -> None: ...
+
+ #
+ @overload
+ def argpartition(
+ self,
+ /,
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex | None = -1,
+ kind: _PartitionKind = "introselect",
+ order: None = None,
+ ) -> NDArray[intp]: ...
+ @overload
+ def argpartition(
+ self: NDArray[void],
+ /,
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex | None = -1,
+ kind: _PartitionKind = "introselect",
+ order: str | Sequence[str] | None = None,
+ ) -> NDArray[intp]: ...
+
+ #
+ def diagonal(
+ self,
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ ) -> ndarray[_AnyShape, _DTypeT_co]: ...
+
+ # 1D + 1D returns a scalar;
+ # all other with at least 1 non-0D array return an ndarray.
+ @overload
+ def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ...
+ @overload
+ def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc]
+ @overload
+ def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ...
+
+ # `nonzero()` is deprecated for 0d arrays/generics
+ def nonzero(self) -> tuple[NDArray[intp], ...]: ...
+
+ # `put` is technically available to `generic`,
+ # but is pointless as `generic`s are immutable
+ def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ...
+
+ @overload
+ def searchsorted( # type: ignore[misc]
+ self, # >= 1D array
+ v: _ScalarLike_co, # 0D array-like
+ side: _SortSide = ...,
+ sorter: _ArrayLikeInt_co | None = ...,
+ ) -> intp: ...
+ @overload
+ def searchsorted(
+ self, # >= 1D array
+ v: ArrayLike,
+ side: _SortSide = ...,
+ sorter: _ArrayLikeInt_co | None = ...,
+ ) -> NDArray[intp]: ...
+
+ def sort(
+ self,
+ axis: SupportsIndex = ...,
+ kind: _SortKind | None = ...,
+ order: str | Sequence[str] | None = ...,
+ *,
+ stable: bool | None = ...,
+ ) -> None: ...
+
+ @overload
+ def trace(
+ self, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ) -> Any: ...
+ @overload
+ def trace(
+ self, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayT = ...,
+ ) -> _ArrayT: ...
+
+ @overload
+ def take( # type: ignore[misc]
+ self: NDArray[_ScalarT],
+ indices: _IntLike_co,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> _ScalarT: ...
+ @overload
+ def take( # type: ignore[misc]
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> ndarray[_AnyShape, _DTypeT_co]: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ out: _ArrayT = ...,
+ mode: _ModeKind = ...,
+ ) -> _ArrayT: ...
+
+ @overload
+ def repeat(
+ self,
+ repeats: _ArrayLikeInt_co,
+ axis: None = None,
+ ) -> ndarray[tuple[int], _DTypeT_co]: ...
+ @overload
+ def repeat(
+ self,
+ repeats: _ArrayLikeInt_co,
+ axis: SupportsIndex,
+ ) -> ndarray[_AnyShape, _DTypeT_co]: ...
+
+ def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ...
+ def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ...
+
+ # NOTE: reshape also accepts negative integers, so we can't use integer literals
+ @overload # (None)
+ def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ...
+ @overload # (empty_sequence)
+ def reshape( # type: ignore[overload-overlap] # mypy false positive
+ self,
+ shape: Sequence[Never],
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[()], _DTypeT_co]: ...
+ @overload # (() | (int) | (int, int) | ....) # up to 8-d
+ def reshape(
+ self,
+ shape: _AnyShapeT,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[_AnyShapeT, _DTypeT_co]: ...
+ @overload # (index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[int], _DTypeT_co]: ...
+ @overload # (index, index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ size2: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[int, int], _DTypeT_co]: ...
+ @overload # (index, index, index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ size2: SupportsIndex,
+ size3: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ...
+ @overload # (index, index, index, index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ size2: SupportsIndex,
+ size3: SupportsIndex,
+ size4: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ...
+ @overload # (int, *(index, ...))
+ def reshape(
+ self,
+ size0: SupportsIndex,
+ /,
+ *shape: SupportsIndex,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[_AnyShape, _DTypeT_co]: ...
+ @overload # (sequence[index])
+ def reshape(
+ self,
+ shape: Sequence[SupportsIndex],
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[_AnyShape, _DTypeT_co]: ...
+
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarT],
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: builtins.bool = ...,
+ copy: builtins.bool | _CopyMode = ...,
+ ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: builtins.bool = ...,
+ copy: builtins.bool | _CopyMode = ...,
+ ) -> ndarray[_ShapeT_co, dtype]: ...
+
+ #
+ @overload # ()
+ def view(self, /) -> Self: ...
+ @overload # (dtype: T)
+ def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ...
+ @overload # (dtype: dtype[T])
+ def view(self, /, dtype: _DTypeLike[_ScalarT]) -> NDArray[_ScalarT]: ...
+ @overload # (type: T)
+ def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ...
+ @overload # (_: T)
+ def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ...
+ @overload # (dtype: ?)
+ def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ...
+ @overload # (dtype: ?, type: type[T])
+ def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ...
+
+ def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ...
+ @overload
+ def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ...
+ @overload
+ def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ...
+
+ def __index__(self: NDArray[integer], /) -> int: ...
+ def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ...
+
+ def __len__(self) -> int: ...
+ def __contains__(self, value: object, /) -> builtins.bool: ...
+
+ # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns
+ # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`.
+ # This way the bug only occurs for 9-D arrays, which are probably not very common.
+ @overload
+ def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ...
+ @overload # == 1-d & dtype[T \ object_]
+ def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ...
+ @overload # >= 2-d
+ def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ...
+ @overload # ?-d
+ def __iter__(self, /) -> Iterator[Any]: ...
+
+ #
+ @overload
+ def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __lt__(
+ self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ...
+ @overload
+ def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ...
+
+ #
+ @overload
+ def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __le__(
+ self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ...
+ @overload
+ def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ...
+
+ #
+ @overload
+ def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __gt__(
+ self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ...
+ @overload
+ def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ...
+
+ #
+ @overload
+ def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ...
+ @overload
+ def __ge__(
+ self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ...
+ @overload
+ def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ...
+
+ # Unary ops
+
+ # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed
+ # @overload
+ # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ...
+ # @overload
+ # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ...
+ # @overload
+ # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ...
+ # @overload
+ # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ...
+ @overload
+ def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ...
+ @overload
+ def __abs__(self: _RealArrayT, /) -> _RealArrayT: ...
+
+ def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019
+ def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019
+ def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019
+
+ # Binary ops
+
+ # TODO: Support the "1d @ 1d -> scalar" case
+ @overload
+ def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ...
+ @overload
+ def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap]
+ @overload
+ def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ...
+ @overload
+ def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ...
+ @overload
+ def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload # signature equivalent to __matmul__
+ def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ...
+ @overload
+ def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ...
+ @overload
+ def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ...
+ @overload
+ def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ...
+ @overload
+ def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ...
+ @overload
+ def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ...
+ @overload
+ def __mod__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload # signature equivalent to __mod__
+ def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ...
+ @overload
+ def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ...
+ @overload
+ def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ...
+ @overload
+ def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ...
+ @overload
+ def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ...
+ @overload
+ def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ...
+ @overload
+ def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ...
+ @overload
+ def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ...
+
+ @overload # signature equivalent to __divmod__
+ def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ...
+ @overload
+ def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ...
+ @overload
+ def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ...
+ @overload
+ def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ...
+ @overload
+ def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ...
+
+ @overload
+ def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap]
+ @overload
+ def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ...
+ @overload
+ def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ...
+ @overload
+ def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ...
+ @overload
+ def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ...
+ @overload
+ def __add__(
+ self: ndarray[Any, dtypes.StringDType],
+ other: _ArrayLikeStr_co | _ArrayLikeString_co,
+ /,
+ ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ...
+ @overload
+ def __add__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload # signature equivalent to __add__
+ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap]
+ @overload
+ def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ...
+ @overload
+ def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ...
+ @overload
+ def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ...
+ @overload
+ def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ...
+ @overload
+ def __radd__(
+ self: ndarray[Any, dtypes.StringDType],
+ other: _ArrayLikeStr_co | _ArrayLikeString_co,
+ /,
+ ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ...
+ @overload
+ def __radd__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ...
+ @overload
+ def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap]
+ @overload
+ def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ...
+ @overload
+ def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __sub__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ...
+ @overload
+ def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ...
+ @overload
+ def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ...
+ @overload
+ def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ...
+ @overload
+ def __mul__(
+ self: ndarray[Any, dtype[character] | dtypes.StringDType],
+ other: _ArrayLikeInt,
+ /,
+ ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ...
+ @overload
+ def __mul__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload # signature equivalent to __mul__
+ def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ...
+ @overload
+ def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ...
+ @overload
+ def __rmul__(
+ self: ndarray[Any, dtype[character] | dtypes.StringDType],
+ other: _ArrayLikeInt,
+ /,
+ ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ...
+ @overload
+ def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ...
+ @overload
+ def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ...
+ @overload
+ def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ...
+ @overload
+ def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ...
+ @overload
+ def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ...
+ @overload
+ def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ...
+ @overload
+ def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ...
+ @overload
+ def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ...
+ @overload
+ def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ...
+ @overload
+ def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ...
+ @overload
+ def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ...
+ @overload
+ def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ...
+ @overload
+ def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ...
+ @overload
+ def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ...
+ @overload
+ def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ...
+ @overload
+ def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ...
+ @overload
+ def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ...
+ @overload
+ def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ...
+ @overload
+ def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ...
+ @overload
+ def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap]
+ @overload
+ def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ...
+ @overload
+ def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ...
+ @overload
+ def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ...
+ @overload
+ def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ...
+ @overload
+ def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ...
+ @overload
+ def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ...
+ @overload
+ def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ...
+ @overload
+ def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ...
+ @overload
+ def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ...
+ @overload
+ def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap]
+ @overload
+ def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ...
+ @overload
+ def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ...
+ @overload
+ def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ...
+ @overload
+ def __pow__(
+ self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, /
+ ) -> NDArray[complex128]: ...
+ @overload
+ def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ...
+ @overload
+ def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ...
+ @overload
+ def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ...
+ @overload
+ def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ...
+
+ @overload
+ def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ...
+ @overload
+ def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ...
+ @overload
+ def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ...
+ @overload
+ def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ...
+ @overload
+ def __rpow__(
+ self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, /
+ ) -> NDArray[complex128]: ...
+ @overload
+ def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap]
+ @overload
+ def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ...
+ @overload
+ def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ...
+ @overload
+ def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ...
+ @overload
+ def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ...
+
+ @overload
+ def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc]
+ @overload
+ def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __and__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc]
+ @overload
+ def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __rand__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc]
+ @overload
+ def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __xor__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc]
+ @overload
+ def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc]
+ @overload
+ def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __or__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ @overload
+ def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc]
+ @overload
+ def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+ @overload
+ def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ...
+ @overload
+ def __ror__(self: NDArray[object_], other: Any, /) -> Any: ...
+ @overload
+ def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ...
+
+ # `np.generic` does not support inplace operations
+
+ # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left
+ # operand. An exception to this rule are unsigned integers though, which
+ # also accepts a signed integer for the right operand as long it is a 0D
+ # object and its value is >= 0
+ # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't
+ # work, as this will lead to `false negatives` when using these inplace ops.
+ @overload
+ def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iadd__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iadd__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iadd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iadd__(
+ self: ndarray[Any, dtype[str_] | dtypes.StringDType],
+ other: _ArrayLikeStr_co | _ArrayLikeString_co,
+ /,
+ ) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ #
+ @overload
+ def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __isub__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __isub__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ #
+ @overload
+ def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imul__(
+ self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, /
+ ) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ @overload
+ def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ #
+ @overload
+ def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ # keep in sync with `__imod__`
+ @overload
+ def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ # keep in sync with `__ifloordiv__`
+ @overload
+ def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imod__(
+ self: NDArray[timedelta64],
+ other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]],
+ /,
+ ) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ # keep in sync with `__irshift__`
+ @overload
+ def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ # keep in sync with `__ilshift__`
+ @overload
+ def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ # keep in sync with `__ixor__` and `__ior__`
+ @overload
+ def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ # keep in sync with `__iand__` and `__ior__`
+ @overload
+ def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ # keep in sync with `__iand__` and `__ixor__`
+ @overload
+ def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ #
+ @overload
+ def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imatmul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ...
+
+ #
+ def __dlpack__(
+ self: NDArray[number],
+ /,
+ *,
+ stream: int | Any | None = None,
+ max_version: tuple[int, int] | None = None,
+ dl_device: tuple[int, int] | None = None,
+ copy: builtins.bool | None = None,
+ ) -> CapsuleType: ...
+ def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ...
+
+ # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
+ @property
+ def dtype(self) -> _DTypeT_co: ...
+
+# NOTE: while `np.generic` is not technically an instance of `ABCMeta`,
+# the `@abstractmethod` decorator is herein used to (forcefully) deny
+# the creation of `np.generic` instances.
+# The `# type: ignore` comments are necessary to silence mypy errors regarding
+# the missing `ABCMeta` metaclass.
+# See https://github.com/numpy/numpy-stubs/pull/80 for more details.
+class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]):
+ @abstractmethod
+ def __init__(self, *args: Any, **kwargs: Any) -> None: ...
+ def __hash__(self) -> int: ...
+ @overload
+ def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ...
+ @overload
+ def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ...
+ if sys.version_info >= (3, 12):
+ def __buffer__(self, flags: int, /) -> memoryview: ...
+
+ @property
+ def base(self) -> None: ...
+ @property
+ def ndim(self) -> L[0]: ...
+ @property
+ def size(self) -> L[1]: ...
+ @property
+ def shape(self) -> tuple[()]: ...
+ @property
+ def strides(self) -> tuple[()]: ...
+ @property
+ def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ...
+
+ @overload
+ def item(self, /) -> _ItemT_co: ...
+ @overload
+ def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ...
+ def tolist(self, /) -> _ItemT_co: ...
+
+ def byteswap(self, inplace: L[False] = ...) -> Self: ...
+
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarT],
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: builtins.bool = ...,
+ copy: builtins.bool | _CopyMode = ...,
+ ) -> _ScalarT: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: builtins.bool = ...,
+ copy: builtins.bool | _CopyMode = ...,
+ ) -> Any: ...
+
+ # NOTE: `view` will perform a 0D->scalar cast,
+ # thus the array `type` is irrelevant to the output type
+ @overload
+ def view(self, type: type[NDArray[Any]] = ...) -> Self: ...
+ @overload
+ def view(
+ self,
+ dtype: _DTypeLike[_ScalarT],
+ type: type[NDArray[Any]] = ...,
+ ) -> _ScalarT: ...
+ @overload
+ def view(
+ self,
+ dtype: DTypeLike,
+ type: type[NDArray[Any]] = ...,
+ ) -> Any: ...
+
+ @overload
+ def getfield(
+ self,
+ dtype: _DTypeLike[_ScalarT],
+ offset: SupportsIndex = ...
+ ) -> _ScalarT: ...
+ @overload
+ def getfield(
+ self,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...
+ ) -> Any: ...
+
+ @overload
+ def take( # type: ignore[misc]
+ self,
+ indices: _IntLike_co,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> Self: ...
+ @overload
+ def take( # type: ignore[misc]
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> NDArray[Self]: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ out: _ArrayT = ...,
+ mode: _ModeKind = ...,
+ ) -> _ArrayT: ...
+
+ def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ...
+ def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ...
+ def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ...
+
+ @overload # (() | [])
+ def reshape(
+ self,
+ shape: tuple[()] | list[Never],
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> Self: ...
+ @overload # ((1, *(1, ...))@_ShapeT)
+ def reshape(
+ self,
+ shape: _1NShapeT,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[_1NShapeT, dtype[Self]]: ...
+ @overload # (Sequence[index, ...]) # not recommended
+ def reshape(
+ self,
+ shape: Sequence[SupportsIndex],
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ...
+ @overload # _(index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[L[1]], dtype[Self]]: ...
+ @overload # _(index, index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ size2: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ...
+ @overload # _(index, index, index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ size2: SupportsIndex,
+ size3: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ...
+ @overload # _(index, index, index, index)
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ size2: SupportsIndex,
+ size3: SupportsIndex,
+ size4: SupportsIndex,
+ /,
+ *,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ...
+ @overload # _(index, index, index, index, index, *index) # ndim >= 5
+ def reshape(
+ self,
+ size1: SupportsIndex,
+ size2: SupportsIndex,
+ size3: SupportsIndex,
+ size4: SupportsIndex,
+ size5: SupportsIndex,
+ /,
+ *sizes6_: SupportsIndex,
+ order: _OrderACF = "C",
+ copy: builtins.bool | None = None,
+ ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ...
+
+ def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ...
+ def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ...
+
+ @overload
+ def all(
+ self,
+ /,
+ axis: L[0, -1] | tuple[()] | None = None,
+ out: None = None,
+ keepdims: SupportsIndex = False,
+ *,
+ where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True
+ ) -> np.bool: ...
+ @overload
+ def all(
+ self,
+ /,
+ axis: L[0, -1] | tuple[()] | None,
+ out: ndarray[tuple[()], dtype[_ScalarT]],
+ keepdims: SupportsIndex = False,
+ *,
+ where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True,
+ ) -> _ScalarT: ...
+ @overload
+ def all(
+ self,
+ /,
+ axis: L[0, -1] | tuple[()] | None = None,
+ *,
+ out: ndarray[tuple[()], dtype[_ScalarT]],
+ keepdims: SupportsIndex = False,
+ where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True,
+ ) -> _ScalarT: ...
+
+ @overload
+ def any(
+ self,
+ /,
+ axis: L[0, -1] | tuple[()] | None = None,
+ out: None = None,
+ keepdims: SupportsIndex = False,
+ *,
+ where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True
+ ) -> np.bool: ...
+ @overload
+ def any(
+ self,
+ /,
+ axis: L[0, -1] | tuple[()] | None,
+ out: ndarray[tuple[()], dtype[_ScalarT]],
+ keepdims: SupportsIndex = False,
+ *,
+ where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True,
+ ) -> _ScalarT: ...
+ @overload
+ def any(
+ self,
+ /,
+ axis: L[0, -1] | tuple[()] | None = None,
+ *,
+ out: ndarray[tuple[()], dtype[_ScalarT]],
+ keepdims: SupportsIndex = False,
+ where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True,
+ ) -> _ScalarT: ...
+
+ # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
+ @property
+ def dtype(self) -> _dtype[Self]: ...
+
+class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]):
+ @abstractmethod
+ def __init__(self, value: _NumberItemT_co, /) -> None: ...
+ def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
+
+ def __neg__(self) -> Self: ...
+ def __pos__(self) -> Self: ...
+ def __abs__(self) -> Self: ...
+
+ __add__: _NumberOp
+ __radd__: _NumberOp
+ __sub__: _NumberOp
+ __rsub__: _NumberOp
+ __mul__: _NumberOp
+ __rmul__: _NumberOp
+ __floordiv__: _NumberOp
+ __rfloordiv__: _NumberOp
+ __pow__: _NumberOp
+ __rpow__: _NumberOp
+ __truediv__: _NumberOp
+ __rtruediv__: _NumberOp
+
+ __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co]
+ __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co]
+ __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co]
+ __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co]
+
+class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]):
+ @property
+ def itemsize(self) -> L[1]: ...
+ @property
+ def nbytes(self) -> L[1]: ...
+ @property
+ def real(self) -> Self: ...
+ @property
+ def imag(self) -> np.bool[L[False]]: ...
+
+ @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245
+ def __init__(self: np.bool[builtins.bool], value: Never, /) -> None: ...
+ @overload
+ def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ...
+ @overload
+ def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ...
+ @overload
+ def __init__(self: np.bool[builtins.bool], value: object, /) -> None: ...
+
+ def __bool__(self, /) -> _BoolItemT_co: ...
+
+ @overload
+ def __int__(self: np.bool[L[False]], /) -> L[0]: ...
+ @overload
+ def __int__(self: np.bool[L[True]], /) -> L[1]: ...
+ @overload
+ def __int__(self, /) -> L[0, 1]: ...
+
+ def __abs__(self) -> Self: ...
+
+ @overload
+ def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ...
+ @overload
+ def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ...
+ @overload
+ def __invert__(self, /) -> np.bool: ...
+
+ @overload
+ def __add__(self, other: _NumberT, /) -> _NumberT: ...
+ @overload
+ def __add__(self, other: builtins.bool | bool_, /) -> bool_: ...
+ @overload
+ def __add__(self, other: int, /) -> int_: ...
+ @overload
+ def __add__(self, other: float, /) -> float64: ...
+ @overload
+ def __add__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __radd__(self, other: _NumberT, /) -> _NumberT: ...
+ @overload
+ def __radd__(self, other: builtins.bool, /) -> bool_: ...
+ @overload
+ def __radd__(self, other: int, /) -> int_: ...
+ @overload
+ def __radd__(self, other: float, /) -> float64: ...
+ @overload
+ def __radd__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __sub__(self, other: _NumberT, /) -> _NumberT: ...
+ @overload
+ def __sub__(self, other: int, /) -> int_: ...
+ @overload
+ def __sub__(self, other: float, /) -> float64: ...
+ @overload
+ def __sub__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __rsub__(self, other: _NumberT, /) -> _NumberT: ...
+ @overload
+ def __rsub__(self, other: int, /) -> int_: ...
+ @overload
+ def __rsub__(self, other: float, /) -> float64: ...
+ @overload
+ def __rsub__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __mul__(self, other: _NumberT, /) -> _NumberT: ...
+ @overload
+ def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ...
+ @overload
+ def __mul__(self, other: int, /) -> int_: ...
+ @overload
+ def __mul__(self, other: float, /) -> float64: ...
+ @overload
+ def __mul__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __rmul__(self, other: _NumberT, /) -> _NumberT: ...
+ @overload
+ def __rmul__(self, other: builtins.bool, /) -> bool_: ...
+ @overload
+ def __rmul__(self, other: int, /) -> int_: ...
+ @overload
+ def __rmul__(self, other: float, /) -> float64: ...
+ @overload
+ def __rmul__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ...
+ @overload
+ def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ...
+ @overload
+ def __pow__(self, other: int, mod: None = None, /) -> int_: ...
+ @overload
+ def __pow__(self, other: float, mod: None = None, /) -> float64: ...
+ @overload
+ def __pow__(self, other: complex, mod: None = None, /) -> complex128: ...
+
+ @overload
+ def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ...
+ @overload
+ def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ...
+ @overload
+ def __rpow__(self, other: int, mod: None = None, /) -> int_: ...
+ @overload
+ def __rpow__(self, other: float, mod: None = None, /) -> float64: ...
+ @overload
+ def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ...
+
+ @overload
+ def __truediv__(self, other: _InexactT, /) -> _InexactT: ...
+ @overload
+ def __truediv__(self, other: float | integer | bool_, /) -> float64: ...
+ @overload
+ def __truediv__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ...
+ @overload
+ def __rtruediv__(self, other: float | integer, /) -> float64: ...
+ @overload
+ def __rtruediv__(self, other: complex, /) -> complex128: ...
+
+ @overload
+ def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ...
+ @overload
+ def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ...
+ @overload
+ def __floordiv__(self, other: int, /) -> int_: ...
+ @overload
+ def __floordiv__(self, other: float, /) -> float64: ...
+
+ @overload
+ def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ...
+ @overload
+ def __rfloordiv__(self, other: builtins.bool, /) -> int8: ...
+ @overload
+ def __rfloordiv__(self, other: int, /) -> int_: ...
+ @overload
+ def __rfloordiv__(self, other: float, /) -> float64: ...
+
+ # keep in sync with __floordiv__
+ @overload
+ def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ...
+ @overload
+ def __mod__(self, other: builtins.bool | bool_, /) -> int8: ...
+ @overload
+ def __mod__(self, other: int, /) -> int_: ...
+ @overload
+ def __mod__(self, other: float, /) -> float64: ...
+
+ # keep in sync with __rfloordiv__
+ @overload
+ def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ...
+ @overload
+ def __rmod__(self, other: builtins.bool, /) -> int8: ...
+ @overload
+ def __rmod__(self, other: int, /) -> int_: ...
+ @overload
+ def __rmod__(self, other: float, /) -> float64: ...
+
+ # keep in sync with __mod__
+ @overload
+ def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ...
+ @overload
+ def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ...
+ @overload
+ def __divmod__(self, other: int, /) -> _2Tuple[int_]: ...
+ @overload
+ def __divmod__(self, other: float, /) -> _2Tuple[float64]: ...
+
+ # keep in sync with __rmod__
+ @overload
+ def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ...
+ @overload
+ def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ...
+ @overload
+ def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ...
+ @overload
+ def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ...
+
+ @overload
+ def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ...
+ @overload
+ def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ...
+ @overload
+ def __lshift__(self, other: int, /) -> int_: ...
+
+ @overload
+ def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ...
+ @overload
+ def __rlshift__(self, other: builtins.bool, /) -> int8: ...
+ @overload
+ def __rlshift__(self, other: int, /) -> int_: ...
+
+ # keep in sync with __lshift__
+ @overload
+ def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ...
+ @overload
+ def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ...
+ @overload
+ def __rshift__(self, other: int, /) -> int_: ...
+
+ # keep in sync with __rlshift__
+ @overload
+ def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ...
+ @overload
+ def __rrshift__(self, other: builtins.bool, /) -> int8: ...
+ @overload
+ def __rrshift__(self, other: int, /) -> int_: ...
+
+ @overload
+ def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ...
+ @overload
+ def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ...
+ @overload
+ def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ...
+ @overload
+ def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ...
+ @overload
+ def __and__(self, other: _IntegerT, /) -> _IntegerT: ...
+ @overload
+ def __and__(self, other: int, /) -> np.bool | intp: ...
+ __rand__ = __and__
+
+ @overload
+ def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ...
+ @overload
+ def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ...
+ @overload
+ def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ...
+ @overload
+ def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ...
+ @overload
+ def __xor__(self, other: _IntegerT, /) -> _IntegerT: ...
+ @overload
+ def __xor__(self, other: int, /) -> np.bool | intp: ...
+ __rxor__ = __xor__
+
+ @overload
+ def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ...
+ @overload
+ def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ...
+ @overload
+ def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ...
+ @overload
+ def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ...
+ @overload
+ def __or__(self, other: _IntegerT, /) -> _IntegerT: ...
+ @overload
+ def __or__(self, other: int, /) -> np.bool | intp: ...
+ __ror__ = __or__
+
+ __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co]
+ __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co]
+ __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co]
+ __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co]
+
+# NOTE: This should _not_ be `Final` or a `TypeAlias`
+bool_ = bool
+
+# NOTE: The `object_` constructor returns the passed object, so instances with type
+# `object_` cannot exists (at runtime).
+# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't
+# be made generic.
+@final
+class object_(_RealMixin, generic):
+ @overload
+ def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc]
+ @overload
+ def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc]
+ @overload
+ def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc]
+ @overload
+ def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc]
+ @overload
+ def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc]
+ @overload # catch-all
+ def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc]
+ def __init__(self, value: object = ..., /) -> None: ...
+ def __hash__(self, /) -> int: ...
+ def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__
+ def __call__(self, /, *args: object, **kwargs: object) -> Any: ...
+
+ if sys.version_info >= (3, 12):
+ def __release_buffer__(self, buffer: memoryview, /) -> None: ...
+
+class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]):
+ @abstractmethod
+ def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ...
+
+ # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes
+ def bit_count(self, /) -> int: ...
+ def __index__(self, /) -> int: ...
+ def __invert__(self, /) -> Self: ...
+
+ __truediv__: _IntTrueDiv[_NBit]
+ __rtruediv__: _IntTrueDiv[_NBit]
+ def __mod__(self, value: _IntLike_co, /) -> integer: ...
+ def __rmod__(self, value: _IntLike_co, /) -> integer: ...
+ # Ensure that objects annotated as `integer` support bit-wise operations
+ def __lshift__(self, other: _IntLike_co, /) -> integer: ...
+ def __rlshift__(self, other: _IntLike_co, /) -> integer: ...
+ def __rshift__(self, other: _IntLike_co, /) -> integer: ...
+ def __rrshift__(self, other: _IntLike_co, /) -> integer: ...
+ def __and__(self, other: _IntLike_co, /) -> integer: ...
+ def __rand__(self, other: _IntLike_co, /) -> integer: ...
+ def __or__(self, other: _IntLike_co, /) -> integer: ...
+ def __ror__(self, other: _IntLike_co, /) -> integer: ...
+ def __xor__(self, other: _IntLike_co, /) -> integer: ...
+ def __rxor__(self, other: _IntLike_co, /) -> integer: ...
+
+class signedinteger(integer[_NBit1]):
+ def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ...
+
+ __add__: _SignedIntOp[_NBit1]
+ __radd__: _SignedIntOp[_NBit1]
+ __sub__: _SignedIntOp[_NBit1]
+ __rsub__: _SignedIntOp[_NBit1]
+ __mul__: _SignedIntOp[_NBit1]
+ __rmul__: _SignedIntOp[_NBit1]
+ __floordiv__: _SignedIntOp[_NBit1]
+ __rfloordiv__: _SignedIntOp[_NBit1]
+ __pow__: _SignedIntOp[_NBit1]
+ __rpow__: _SignedIntOp[_NBit1]
+ __lshift__: _SignedIntBitOp[_NBit1]
+ __rlshift__: _SignedIntBitOp[_NBit1]
+ __rshift__: _SignedIntBitOp[_NBit1]
+ __rrshift__: _SignedIntBitOp[_NBit1]
+ __and__: _SignedIntBitOp[_NBit1]
+ __rand__: _SignedIntBitOp[_NBit1]
+ __xor__: _SignedIntBitOp[_NBit1]
+ __rxor__: _SignedIntBitOp[_NBit1]
+ __or__: _SignedIntBitOp[_NBit1]
+ __ror__: _SignedIntBitOp[_NBit1]
+ __mod__: _SignedIntMod[_NBit1]
+ __rmod__: _SignedIntMod[_NBit1]
+ __divmod__: _SignedIntDivMod[_NBit1]
+ __rdivmod__: _SignedIntDivMod[_NBit1]
+
+int8 = signedinteger[_8Bit]
+int16 = signedinteger[_16Bit]
+int32 = signedinteger[_32Bit]
+int64 = signedinteger[_64Bit]
+
+byte = signedinteger[_NBitByte]
+short = signedinteger[_NBitShort]
+intc = signedinteger[_NBitIntC]
+intp = signedinteger[_NBitIntP]
+int_ = intp
+long = signedinteger[_NBitLong]
+longlong = signedinteger[_NBitLongLong]
+
+class unsignedinteger(integer[_NBit1]):
+ # NOTE: `uint64 + signedinteger -> float64`
+ def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ...
+
+ __add__: _UnsignedIntOp[_NBit1]
+ __radd__: _UnsignedIntOp[_NBit1]
+ __sub__: _UnsignedIntOp[_NBit1]
+ __rsub__: _UnsignedIntOp[_NBit1]
+ __mul__: _UnsignedIntOp[_NBit1]
+ __rmul__: _UnsignedIntOp[_NBit1]
+ __floordiv__: _UnsignedIntOp[_NBit1]
+ __rfloordiv__: _UnsignedIntOp[_NBit1]
+ __pow__: _UnsignedIntOp[_NBit1]
+ __rpow__: _UnsignedIntOp[_NBit1]
+ __lshift__: _UnsignedIntBitOp[_NBit1]
+ __rlshift__: _UnsignedIntBitOp[_NBit1]
+ __rshift__: _UnsignedIntBitOp[_NBit1]
+ __rrshift__: _UnsignedIntBitOp[_NBit1]
+ __and__: _UnsignedIntBitOp[_NBit1]
+ __rand__: _UnsignedIntBitOp[_NBit1]
+ __xor__: _UnsignedIntBitOp[_NBit1]
+ __rxor__: _UnsignedIntBitOp[_NBit1]
+ __or__: _UnsignedIntBitOp[_NBit1]
+ __ror__: _UnsignedIntBitOp[_NBit1]
+ __mod__: _UnsignedIntMod[_NBit1]
+ __rmod__: _UnsignedIntMod[_NBit1]
+ __divmod__: _UnsignedIntDivMod[_NBit1]
+ __rdivmod__: _UnsignedIntDivMod[_NBit1]
+
+uint8: TypeAlias = unsignedinteger[_8Bit]
+uint16: TypeAlias = unsignedinteger[_16Bit]
+uint32: TypeAlias = unsignedinteger[_32Bit]
+uint64: TypeAlias = unsignedinteger[_64Bit]
+
+ubyte: TypeAlias = unsignedinteger[_NBitByte]
+ushort: TypeAlias = unsignedinteger[_NBitShort]
+uintc: TypeAlias = unsignedinteger[_NBitIntC]
+uintp: TypeAlias = unsignedinteger[_NBitIntP]
+uint: TypeAlias = uintp
+ulong: TypeAlias = unsignedinteger[_NBitLong]
+ulonglong: TypeAlias = unsignedinteger[_NBitLongLong]
+
+class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]):
+ @abstractmethod
+ def __init__(self, value: _InexactItemT_co | None = ..., /) -> None: ...
+
+class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]):
+ def __init__(self, value: _ConvertibleToFloat | None = ..., /) -> None: ...
+
+ __add__: _FloatOp[_NBit1]
+ __radd__: _FloatOp[_NBit1]
+ __sub__: _FloatOp[_NBit1]
+ __rsub__: _FloatOp[_NBit1]
+ __mul__: _FloatOp[_NBit1]
+ __rmul__: _FloatOp[_NBit1]
+ __truediv__: _FloatOp[_NBit1]
+ __rtruediv__: _FloatOp[_NBit1]
+ __floordiv__: _FloatOp[_NBit1]
+ __rfloordiv__: _FloatOp[_NBit1]
+ __pow__: _FloatOp[_NBit1]
+ __rpow__: _FloatOp[_NBit1]
+ __mod__: _FloatMod[_NBit1]
+ __rmod__: _FloatMod[_NBit1]
+ __divmod__: _FloatDivMod[_NBit1]
+ __rdivmod__: _FloatDivMod[_NBit1]
+
+ # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes
+ def is_integer(self, /) -> builtins.bool: ...
+ def as_integer_ratio(self, /) -> tuple[int, int]: ...
+
+float16: TypeAlias = floating[_16Bit]
+float32: TypeAlias = floating[_32Bit]
+
+# either a C `double`, `float`, or `longdouble`
+class float64(floating[_64Bit], float): # type: ignore[misc]
+ def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ...
+
+ #
+ @property
+ def itemsize(self) -> L[8]: ...
+ @property
+ def nbytes(self) -> L[8]: ...
+
+ # overrides for `floating` and `builtins.float` compatibility (`_RealMixin` doesn't work)
+ @property
+ def real(self) -> Self: ...
+ @property
+ def imag(self) -> Self: ...
+ def conjugate(self) -> Self: ...
+ def __getformat__(self, typestr: L["double", "float"], /) -> str: ...
+ def __getnewargs__(self, /) -> tuple[float]: ...
+
+ # float64-specific operator overrides
+ @overload
+ def __add__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __add__(self, other: complex, /) -> float64 | complex128: ...
+ @overload
+ def __radd__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __radd__(self, other: complex, /) -> float64 | complex128: ...
+
+ @overload
+ def __sub__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __sub__(self, other: complex, /) -> float64 | complex128: ...
+ @overload
+ def __rsub__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __rsub__(self, other: complex, /) -> float64 | complex128: ...
+
+ @overload
+ def __mul__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __mul__(self, other: complex, /) -> float64 | complex128: ...
+ @overload
+ def __rmul__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __rmul__(self, other: complex, /) -> float64 | complex128: ...
+
+ @overload
+ def __truediv__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __truediv__(self, other: complex, /) -> float64 | complex128: ...
+ @overload
+ def __rtruediv__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __rtruediv__(self, other: complex, /) -> float64 | complex128: ...
+
+ @overload
+ def __floordiv__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __floordiv__(self, other: complex, /) -> float64 | complex128: ...
+ @overload
+ def __rfloordiv__(self, other: _Float64_co, /) -> float64: ...
+ @overload
+ def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ...
+ @overload
+ def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ...
+
+ @overload
+ def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ...
+ @overload
+ def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ...
+ @overload
+ def __pow__(
+ self, other: complexfloating[_NBit1, _NBit2], mod: None = None, /
+ ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ...
+ @overload
+ def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ...
+ @overload
+ def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ...
+ @overload
+ def __rpow__(
+ self, other: complexfloating[_NBit1, _NBit2], mod: None = None, /
+ ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ @overload
+ def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ...
+
+ def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override]
+ def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override]
+
+ def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override]
+ def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override]
+
+half: TypeAlias = floating[_NBitHalf]
+single: TypeAlias = floating[_NBitSingle]
+double: TypeAlias = floating[_NBitDouble]
+longdouble: TypeAlias = floating[_NBitLongDouble]
+
+# The main reason for `complexfloating` having two typevars is cosmetic.
+# It is used to clarify why `complex128`s precision is `_64Bit`, the latter
+# describing the two 64 bit floats representing its real and imaginary component
+
+class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]):
+ @overload
+ def __init__(
+ self,
+ real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ...,
+ imag: complex | SupportsFloat | SupportsIndex = ...,
+ /,
+ ) -> None: ...
+ @overload
+ def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ...
+
+ @property
+ def real(self) -> floating[_NBit1]: ... # type: ignore[override]
+ @property
+ def imag(self) -> floating[_NBit2]: ... # type: ignore[override]
+
+ # NOTE: `__complex__` is technically defined in the concrete subtypes
+ def __complex__(self, /) -> complex: ...
+ def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override]
+
+ @overload
+ def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+ @overload
+ def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+
+ @overload
+ def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+ @overload
+ def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+
+ @overload
+ def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+ @overload
+ def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+
+ @overload
+ def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+ @overload
+ def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+
+ @overload
+ def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __pow__(
+ self, other: complex | float64 | complex128, mod: None = None, /
+ ) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __pow__(
+ self, other: number[_NBit], mod: None = None, /
+ ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+ @overload
+ def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ...
+ @overload
+ def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ...
+ @overload
+ def __rpow__(
+ self, other: number[_NBit], mod: None = None, /
+ ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ...
+
+complex64: TypeAlias = complexfloating[_32Bit, _32Bit]
+
+class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc]
+ @overload
+ def __new__(
+ cls,
+ real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ...,
+ imag: complex | SupportsFloat | SupportsIndex = ...,
+ /,
+ ) -> Self: ...
+ @overload
+ def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ...
+
+ #
+ @property
+ def itemsize(self) -> L[16]: ...
+ @property
+ def nbytes(self) -> L[16]: ...
+
+ # overrides for `floating` and `builtins.float` compatibility
+ @property
+ def real(self) -> float64: ...
+ @property
+ def imag(self) -> float64: ...
+ def conjugate(self) -> Self: ...
+ def __abs__(self) -> float64: ... # type: ignore[override]
+ def __getnewargs__(self, /) -> tuple[float, float]: ...
+
+ # complex128-specific operator overrides
+ @overload
+ def __add__(self, other: _Complex128_co, /) -> complex128: ...
+ @overload
+ def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ def __radd__(self, other: _Complex128_co, /) -> complex128: ...
+
+ @overload
+ def __sub__(self, other: _Complex128_co, /) -> complex128: ...
+ @overload
+ def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ def __rsub__(self, other: _Complex128_co, /) -> complex128: ...
+
+ @overload
+ def __mul__(self, other: _Complex128_co, /) -> complex128: ...
+ @overload
+ def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ def __rmul__(self, other: _Complex128_co, /) -> complex128: ...
+
+ @overload
+ def __truediv__(self, other: _Complex128_co, /) -> complex128: ...
+ @overload
+ def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ...
+
+ @overload
+ def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ...
+ @overload
+ def __pow__(
+ self, other: complexfloating[_NBit1, _NBit2], mod: None = None, /
+ ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ...
+ def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ...
+
+csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle]
+cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble]
+clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble]
+
+class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]):
+ @property
+ def itemsize(self) -> L[8]: ...
+ @property
+ def nbytes(self) -> L[8]: ...
+
+ @overload
+ def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ...
+ @overload
+ def __init__(self: timedelta64[L[0]], /) -> None: ...
+ @overload
+ def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ...
+ @overload
+ def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ...
+ @overload
+ def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ...
+ @overload
+ def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ...
+ @overload
+ def __init__(
+ self: timedelta64[dt.timedelta],
+ value: dt.timedelta | _IntLike_co,
+ format: _TimeUnitSpec[_NativeTD64Unit] = ...,
+ /,
+ ) -> None: ...
+ @overload
+ def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ...
+
+ # inherited at runtime from `signedinteger`
+ def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ...
+
+ # NOTE: Only a limited number of units support conversion
+ # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as`
+ def __int__(self: timedelta64[int], /) -> int: ...
+ def __float__(self: timedelta64[int], /) -> float: ...
+
+ def __neg__(self, /) -> Self: ...
+ def __pos__(self, /) -> Self: ...
+ def __abs__(self, /) -> Self: ...
+
+ @overload
+ def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ...
+ @overload
+ def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ...
+ @overload
+ def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ...
+ @overload
+ def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ...
+ @overload
+ def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ...
+ @overload
+ def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ...
+ __radd__ = __add__
+
+ @overload
+ def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ...
+ @overload
+ def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ...
+ @overload
+ def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ...
+ __rmul__ = __mul__
+
+ @overload
+ def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ...
+ @overload
+ def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ...
+ @overload
+ def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ...
+ @overload
+ def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ...
+ @overload
+ def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ...
+ @overload
+ def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ...
+ @overload
+ def __mod__(self, x: timedelta64, /) -> timedelta64: ...
+
+ # the L[0] makes __mod__ non-commutative, which the first two overloads reflect
+ @overload
+ def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ...
+ @overload
+ def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ...
+ @overload
+ def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ...
+ @overload
+ def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ...
+ @overload
+ def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ...
+ @overload
+ def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ...
+ @overload
+ def __rmod__(self, x: timedelta64, /) -> timedelta64: ...
+
+ # keep in sync with __mod__
+ @overload
+ def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ...
+ @overload
+ def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ...
+ @overload
+ def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ...
+ @overload
+ def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ...
+ @overload
+ def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ...
+ @overload
+ def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ...
+ @overload
+ def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ...
+
+ # keep in sync with __rmod__
+ @overload
+ def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ...
+ @overload
+ def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ...
+ @overload
+ def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ...
+ @overload
+ def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ...
+ @overload
+ def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ...
+ @overload
+ def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ...
+ @overload
+ def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ...
+
+ @overload
+ def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ...
+ @overload
+ def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ...
+ @overload
+ def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ...
+ @overload
+ def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ...
+ @overload
+ def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ...
+ @overload
+ def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ...
+
+ @overload
+ def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ...
+ @overload
+ def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ...
+ @overload
+ def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ...
+ @overload
+ def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ...
+ @overload
+ def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ...
+ @overload
+ def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ...
+
+ @overload
+ def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ...
+ @overload
+ def __truediv__(self, b: timedelta64, /) -> float64: ...
+ @overload
+ def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ...
+ @overload
+ def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ...
+ @overload
+ def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ...
+ @overload
+ def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ...
+ @overload
+ def __rtruediv__(self, a: timedelta64, /) -> float64: ...
+
+ @overload
+ def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ...
+ @overload
+ def __floordiv__(self, b: timedelta64, /) -> int64: ...
+ @overload
+ def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ...
+ @overload
+ def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ...
+ @overload
+ def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ...
+ @overload
+ def __rfloordiv__(self, a: timedelta64, /) -> int64: ...
+
+ __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co]
+ __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co]
+ __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co]
+ __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co]
+
+class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]):
+ @property
+ def itemsize(self) -> L[8]: ...
+ @property
+ def nbytes(self) -> L[8]: ...
+
+ @overload
+ def __init__(self, value: datetime64[_DT64ItemT_co], /) -> None: ...
+ @overload
+ def __init__(self: datetime64[_AnyDT64Arg], value: _AnyDT64Arg, /) -> None: ...
+ @overload
+ def __init__(self: datetime64[None], value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> None: ...
+ @overload
+ def __init__(self: datetime64[dt.datetime], value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> None: ...
+ @overload
+ def __init__(self: datetime64[dt.date], value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> None: ...
+ @overload
+ def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ...
+ @overload
+ def __init__(
+ self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], /
+ ) -> None: ...
+ @overload
+ def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ...
+ @overload
+ def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ...
+
+ @overload
+ def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ...
+ @overload
+ def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ...
+ @overload
+ def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ...
+ @overload
+ def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ...
+ @overload
+ def __add__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ...
+ @overload
+ def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ...
+ @overload
+ def __add__(self, x: datetime64[None], /) -> datetime64[None]: ...
+ @overload
+ def __add__(self, x: _TD64Like_co, /) -> datetime64: ...
+ __radd__ = __add__
+
+ @overload
+ def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ...
+ @overload
+ def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ...
+ @overload
+ def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ...
+ @overload
+ def __sub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ...
+ @overload
+ def __sub__(self: datetime64[int], x: timedelta64, /) -> datetime64[int]: ...
+ @overload
+ def __sub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ...
+ @overload
+ def __sub__(self: datetime64[dt.datetime], x: timedelta64[int], /) -> datetime64[int]: ...
+ @overload
+ def __sub__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ...
+ @overload
+ def __sub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ...
+ @overload
+ def __sub__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[dt.date | int]: ...
+ @overload
+ def __sub__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ...
+ @overload
+ def __sub__(self: datetime64[dt.date], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ...
+ @overload
+ def __sub__(self, x: timedelta64[None], /) -> datetime64[None]: ...
+ @overload
+ def __sub__(self, x: datetime64[None], /) -> timedelta64[None]: ...
+ @overload
+ def __sub__(self, x: _TD64Like_co, /) -> datetime64: ...
+ @overload
+ def __sub__(self, x: datetime64, /) -> timedelta64: ...
+
+ @overload
+ def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ...
+ @overload
+ def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ...
+ @overload
+ def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ...
+ @overload
+ def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ...
+ @overload
+ def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ...
+ @overload
+ def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ...
+ @overload
+ def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ...
+ @overload
+ def __rsub__(self, x: datetime64, /) -> timedelta64: ...
+
+ __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co]
+ __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co]
+ __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co]
+ __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co]
+
+class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ...
+
+class void(flexible[bytes | tuple[Any, ...]]):
+ @overload
+ def __init__(self, value: _IntLike_co | bytes, /, dtype: None = None) -> None: ...
+ @overload
+ def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ...
+
+ @overload
+ def __getitem__(self, key: str | SupportsIndex, /) -> Any: ...
+ @overload
+ def __getitem__(self, key: list[str], /) -> void: ...
+ def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ...
+
+ def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ...
+
+class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]):
+ @abstractmethod
+ def __init__(self, value: _CharacterItemT_co = ..., /) -> None: ...
+
+# NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart
+
+class bytes_(character[bytes], bytes):
+ @overload
+ def __new__(cls, o: object = ..., /) -> Self: ...
+ @overload
+ def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ...
+
+ #
+ @overload
+ def __init__(self, o: object = ..., /) -> None: ...
+ @overload
+ def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ...
+
+ #
+ def __bytes__(self, /) -> bytes: ...
+
+class str_(character[str], str):
+ @overload
+ def __new__(cls, value: object = ..., /) -> Self: ...
+ @overload
+ def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ...
+
+ #
+ @overload
+ def __init__(self, value: object = ..., /) -> None: ...
+ @overload
+ def __init__(self, value: bytes, /, encoding: str = ..., errors: str = ...) -> None: ...
+
+# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs
+@final
+class ufunc:
+ @property
+ def __name__(self) -> LiteralString: ...
+ @property
+ def __qualname__(self) -> LiteralString: ...
+ @property
+ def __doc__(self) -> str: ...
+ @property
+ def nin(self) -> int: ...
+ @property
+ def nout(self) -> int: ...
+ @property
+ def nargs(self) -> int: ...
+ @property
+ def ntypes(self) -> int: ...
+ @property
+ def types(self) -> list[LiteralString]: ...
+ # Broad return type because it has to encompass things like
+ #
+ # >>> np.logical_and.identity is True
+ # True
+ # >>> np.add.identity is 0
+ # True
+ # >>> np.sin.identity is None
+ # True
+ #
+ # and any user-defined ufuncs.
+ @property
+ def identity(self) -> Any: ...
+ # This is None for ufuncs and a string for gufuncs.
+ @property
+ def signature(self) -> LiteralString | None: ...
+
+ def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
+ # The next four methods will always exist, but they will just
+ # raise a ValueError ufuncs with that don't accept two input
+ # arguments and return one output argument. Because of that we
+ # can't type them very precisely.
+ def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ...
+ def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ...
+ def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ...
+ def outer(self, *args: Any, **kwargs: Any) -> Any: ...
+ # Similarly at won't be defined for ufuncs that return multiple
+ # outputs, so we can't type it very precisely.
+ def at(self, /, *args: Any, **kwargs: Any) -> None: ...
+
+ #
+ def resolve_dtypes(
+ self,
+ /,
+ dtypes: tuple[dtype | type | None, ...],
+ *,
+ signature: tuple[dtype | None, ...] | None = None,
+ casting: _CastingKind | None = None,
+ reduction: builtins.bool = False,
+ ) -> tuple[dtype, ...]: ...
+
+# Parameters: `__name__`, `ntypes` and `identity`
+absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None]
+add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]]
+arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None]
+arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None]
+arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None]
+arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None]
+arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None]
+arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None]
+arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None]
+bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]]
+bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None]
+bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None]
+bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]]
+bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]]
+cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None]
+ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None]
+conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None]
+conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None]
+copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None]
+cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None]
+cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None]
+deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None]
+degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None]
+divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None]
+divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None]
+equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None]
+exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None]
+exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None]
+expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None]
+fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None]
+float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None]
+floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None]
+floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None]
+fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None]
+fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None]
+fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None]
+frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None]
+gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]]
+greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None]
+greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None]
+heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None]
+hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]]
+invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None]
+isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None]
+isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None]
+isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None]
+isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None]
+lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None]
+ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None]
+left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None]
+less: _UFunc_Nin2_Nout1[L['less'], L[23], None]
+less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None]
+log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None]
+log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None]
+log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None]
+log: _UFunc_Nin1_Nout1[L['log'], L[10], None]
+logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float]
+logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float]
+logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]]
+logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None]
+logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]]
+logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]]
+matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]]
+matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]]
+maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None]
+minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None]
+mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None]
+modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None]
+multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]]
+negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None]
+nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None]
+not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None]
+positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None]
+power: _UFunc_Nin2_Nout1[L['power'], L[18], None]
+rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None]
+radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None]
+reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None]
+remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None]
+right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None]
+rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None]
+sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None]
+signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None]
+sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None]
+sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None]
+spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None]
+sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None]
+square: _UFunc_Nin1_Nout1[L['square'], L[18], None]
+subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None]
+tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None]
+tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None]
+true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None]
+trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None]
+vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]]
+vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]]
+
+abs = absolute
+acos = arccos
+acosh = arccosh
+asin = arcsin
+asinh = arcsinh
+atan = arctan
+atanh = arctanh
+atan2 = arctan2
+concat = concatenate
+bitwise_left_shift = left_shift
+bitwise_invert = invert
+bitwise_right_shift = right_shift
+permute_dims = transpose
+pow = power
+
+class errstate:
+ def __init__(
+ self,
+ *,
+ call: _ErrCall = ...,
+ all: _ErrKind | None = ...,
+ divide: _ErrKind | None = ...,
+ over: _ErrKind | None = ...,
+ under: _ErrKind | None = ...,
+ invalid: _ErrKind | None = ...,
+ ) -> None: ...
+ def __enter__(self) -> None: ...
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ /,
+ ) -> None: ...
+ def __call__(self, func: _CallableT) -> _CallableT: ...
+
+# TODO: The type of each `__next__` and `iters` return-type depends
+# on the length and dtype of `args`; we can't describe this behavior yet
+# as we lack variadics (PEP 646).
+@final
+class broadcast:
+ def __new__(cls, *args: ArrayLike) -> broadcast: ...
+ @property
+ def index(self) -> int: ...
+ @property
+ def iters(self) -> tuple[flatiter[Any], ...]: ...
+ @property
+ def nd(self) -> int: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def numiter(self) -> int: ...
+ @property
+ def shape(self) -> _AnyShape: ...
+ @property
+ def size(self) -> int: ...
+ def __next__(self) -> tuple[Any, ...]: ...
+ def __iter__(self) -> Self: ...
+ def reset(self) -> None: ...
+
+@final
+class busdaycalendar:
+ def __new__(
+ cls,
+ weekmask: ArrayLike = ...,
+ holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ ) -> busdaycalendar: ...
+ @property
+ def weekmask(self) -> NDArray[np.bool]: ...
+ @property
+ def holidays(self) -> NDArray[datetime64]: ...
+
+class finfo(Generic[_FloatingT_co]):
+ dtype: Final[dtype[_FloatingT_co]]
+ bits: Final[int]
+ eps: Final[_FloatingT_co]
+ epsneg: Final[_FloatingT_co]
+ iexp: Final[int]
+ machep: Final[int]
+ max: Final[_FloatingT_co]
+ maxexp: Final[int]
+ min: Final[_FloatingT_co]
+ minexp: Final[int]
+ negep: Final[int]
+ nexp: Final[int]
+ nmant: Final[int]
+ precision: Final[int]
+ resolution: Final[_FloatingT_co]
+ smallest_subnormal: Final[_FloatingT_co]
+ @property
+ def smallest_normal(self) -> _FloatingT_co: ...
+ @property
+ def tiny(self) -> _FloatingT_co: ...
+ @overload
+ def __new__(cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]) -> finfo[floating[_NBit1]]: ...
+ @overload
+ def __new__(cls, dtype: complex | type[complex]) -> finfo[float64]: ...
+ @overload
+ def __new__(cls, dtype: str) -> finfo[floating]: ...
+
+class iinfo(Generic[_IntegerT_co]):
+ dtype: Final[dtype[_IntegerT_co]]
+ kind: Final[LiteralString]
+ bits: Final[int]
+ key: Final[LiteralString]
+ @property
+ def min(self) -> int: ...
+ @property
+ def max(self) -> int: ...
+
+ @overload
+ def __new__(
+ cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co]
+ ) -> iinfo[_IntegerT_co]: ...
+ @overload
+ def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ...
+ @overload
+ def __new__(cls, dtype: str) -> iinfo[Any]: ...
+
+@final
+class nditer:
+ def __new__(
+ cls,
+ op: ArrayLike | Sequence[ArrayLike | None],
+ flags: Sequence[_NDIterFlagsKind] | None = ...,
+ op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ...,
+ op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ op_axes: Sequence[Sequence[SupportsIndex]] | None = ...,
+ itershape: _ShapeLike | None = ...,
+ buffersize: SupportsIndex = ...,
+ ) -> nditer: ...
+ def __enter__(self) -> nditer: ...
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None: ...
+ def __iter__(self) -> nditer: ...
+ def __next__(self) -> tuple[NDArray[Any], ...]: ...
+ def __len__(self) -> int: ...
+ def __copy__(self) -> nditer: ...
+ @overload
+ def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ...
+ @overload
+ def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ...
+ def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ...
+ def close(self) -> None: ...
+ def copy(self) -> nditer: ...
+ def debug_print(self) -> None: ...
+ def enable_external_loop(self) -> None: ...
+ def iternext(self) -> builtins.bool: ...
+ def remove_axis(self, i: SupportsIndex, /) -> None: ...
+ def remove_multi_index(self) -> None: ...
+ def reset(self) -> None: ...
+ @property
+ def dtypes(self) -> tuple[dtype, ...]: ...
+ @property
+ def finished(self) -> builtins.bool: ...
+ @property
+ def has_delayed_bufalloc(self) -> builtins.bool: ...
+ @property
+ def has_index(self) -> builtins.bool: ...
+ @property
+ def has_multi_index(self) -> builtins.bool: ...
+ @property
+ def index(self) -> int: ...
+ @property
+ def iterationneedsapi(self) -> builtins.bool: ...
+ @property
+ def iterindex(self) -> int: ...
+ @property
+ def iterrange(self) -> tuple[int, ...]: ...
+ @property
+ def itersize(self) -> int: ...
+ @property
+ def itviews(self) -> tuple[NDArray[Any], ...]: ...
+ @property
+ def multi_index(self) -> tuple[int, ...]: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def nop(self) -> int: ...
+ @property
+ def operands(self) -> tuple[NDArray[Any], ...]: ...
+ @property
+ def shape(self) -> tuple[int, ...]: ...
+ @property
+ def value(self) -> tuple[NDArray[Any], ...]: ...
+
+class memmap(ndarray[_ShapeT_co, _DTypeT_co]):
+ __array_priority__: ClassVar[float]
+ filename: str | None
+ offset: int
+ mode: str
+ @overload
+ def __new__(
+ subtype,
+ filename: StrOrBytesPath | _SupportsFileMethodsRW,
+ dtype: type[uint8] = ...,
+ mode: _MemMapModeKind = ...,
+ offset: int = ...,
+ shape: int | tuple[int, ...] | None = ...,
+ order: _OrderKACF = ...,
+ ) -> memmap[Any, dtype[uint8]]: ...
+ @overload
+ def __new__(
+ subtype,
+ filename: StrOrBytesPath | _SupportsFileMethodsRW,
+ dtype: _DTypeLike[_ScalarT],
+ mode: _MemMapModeKind = ...,
+ offset: int = ...,
+ shape: int | tuple[int, ...] | None = ...,
+ order: _OrderKACF = ...,
+ ) -> memmap[Any, dtype[_ScalarT]]: ...
+ @overload
+ def __new__(
+ subtype,
+ filename: StrOrBytesPath | _SupportsFileMethodsRW,
+ dtype: DTypeLike,
+ mode: _MemMapModeKind = ...,
+ offset: int = ...,
+ shape: int | tuple[int, ...] | None = ...,
+ order: _OrderKACF = ...,
+ ) -> memmap[Any, dtype]: ...
+ def __array_finalize__(self, obj: object) -> None: ...
+ def __array_wrap__(
+ self,
+ array: memmap[_ShapeT_co, _DTypeT_co],
+ context: tuple[ufunc, tuple[Any, ...], int] | None = ...,
+ return_scalar: builtins.bool = ...,
+ ) -> Any: ...
+ def flush(self) -> None: ...
+
+# TODO: Add a mypy plugin for managing functions whose output type is dependent
+# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`)
+class vectorize:
+ pyfunc: Callable[..., Any]
+ cache: builtins.bool
+ signature: LiteralString | None
+ otypes: LiteralString | None
+ excluded: set[int | str]
+ __doc__: str | None
+ def __init__(
+ self,
+ pyfunc: Callable[..., Any],
+ otypes: str | Iterable[DTypeLike] | None = ...,
+ doc: str | None = ...,
+ excluded: Iterable[int | str] | None = ...,
+ cache: builtins.bool = ...,
+ signature: str | None = ...,
+ ) -> None: ...
+ def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
+
+class poly1d:
+ @property
+ def variable(self) -> LiteralString: ...
+ @property
+ def order(self) -> int: ...
+ @property
+ def o(self) -> int: ...
+ @property
+ def roots(self) -> NDArray[Any]: ...
+ @property
+ def r(self) -> NDArray[Any]: ...
+
+ @property
+ def coeffs(self) -> NDArray[Any]: ...
+ @coeffs.setter
+ def coeffs(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def c(self) -> NDArray[Any]: ...
+ @c.setter
+ def c(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def coef(self) -> NDArray[Any]: ...
+ @coef.setter
+ def coef(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def coefficients(self) -> NDArray[Any]: ...
+ @coefficients.setter
+ def coefficients(self, value: NDArray[Any]) -> None: ...
+
+ __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride]
+
+ @overload
+ def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ...
+ @overload
+ def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ...
+
+ @overload
+ def __call__(self, val: _ScalarLike_co) -> Any: ...
+ @overload
+ def __call__(self, val: poly1d) -> poly1d: ...
+ @overload
+ def __call__(self, val: ArrayLike) -> NDArray[Any]: ...
+
+ def __init__(
+ self,
+ c_or_r: ArrayLike,
+ r: builtins.bool = ...,
+ variable: str | None = ...,
+ ) -> None: ...
+ def __len__(self) -> int: ...
+ def __neg__(self) -> poly1d: ...
+ def __pos__(self) -> poly1d: ...
+ def __mul__(self, other: ArrayLike, /) -> poly1d: ...
+ def __rmul__(self, other: ArrayLike, /) -> poly1d: ...
+ def __add__(self, other: ArrayLike, /) -> poly1d: ...
+ def __radd__(self, other: ArrayLike, /) -> poly1d: ...
+ def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted
+ def __sub__(self, other: ArrayLike, /) -> poly1d: ...
+ def __rsub__(self, other: ArrayLike, /) -> poly1d: ...
+ def __truediv__(self, other: ArrayLike, /) -> poly1d: ...
+ def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ...
+ def __getitem__(self, val: int, /) -> Any: ...
+ def __setitem__(self, key: int, val: Any, /) -> None: ...
+ def __iter__(self) -> Iterator[Any]: ...
+ def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ...
+ def integ(
+ self,
+ m: SupportsInt | SupportsIndex = ...,
+ k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ...,
+ ) -> poly1d: ...
+
+class matrix(ndarray[_2DShapeT_co, _DTypeT_co]):
+ __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride]
+
+ def __new__(
+ subtype, # pyright: ignore[reportSelfClsParameterName]
+ data: ArrayLike,
+ dtype: DTypeLike = ...,
+ copy: builtins.bool = ...,
+ ) -> matrix[_2D, Incomplete]: ...
+ def __array_finalize__(self, obj: object) -> None: ...
+
+ @overload # type: ignore[override]
+ def __getitem__(
+ self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], /
+ ) -> Incomplete: ...
+ @overload
+ def __getitem__(self, key: _ToIndices, /) -> matrix[_2D, _DTypeT_co]: ...
+ @overload
+ def __getitem__(self: matrix[Any, dtype[void]], key: str, /) -> matrix[_2D, dtype]: ...
+ @overload
+ def __getitem__(self: matrix[Any, dtype[void]], key: list[str], /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ #
+ def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ def __imul__(self, other: ArrayLike, /) -> Self: ...
+
+ #
+ def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[misc, override]
+
+ # keep in sync with `prod` and `mean`
+ @overload # type: ignore[override]
+ def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ...
+ @overload
+ def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ...
+ @overload
+ def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `sum` and `mean`
+ @overload # type: ignore[override]
+ def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ...
+ @overload
+ def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ...
+ @overload
+ def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `sum` and `prod`
+ @overload # type: ignore[override]
+ def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ...
+ @overload
+ def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ...
+ @overload
+ def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `var`
+ @overload # type: ignore[override]
+ def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ...
+ @overload
+ def std(
+ self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0
+ ) -> matrix[_2D, Incomplete]: ...
+ @overload
+ def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ...
+ @overload
+ def std( # pyright: ignore[reportIncompatibleMethodOverride]
+ self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0
+ ) -> _ArrayT: ...
+
+ # keep in sync with `std`
+ @overload # type: ignore[override]
+ def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ...
+ @overload
+ def var(
+ self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0
+ ) -> matrix[_2D, Incomplete]: ...
+ @overload
+ def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ...
+ @overload
+ def var( # pyright: ignore[reportIncompatibleMethodOverride]
+ self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0
+ ) -> _ArrayT: ...
+
+ # keep in sync with `all`
+ @overload # type: ignore[override]
+ def any(self, axis: None = None, out: None = None) -> np.bool: ...
+ @overload
+ def any(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ...
+ @overload
+ def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `any`
+ @overload # type: ignore[override]
+ def all(self, axis: None = None, out: None = None) -> np.bool: ...
+ @overload
+ def all(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ...
+ @overload
+ def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `min` and `ptp`
+ @overload # type: ignore[override]
+ def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ...
+ @overload
+ def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ...
+ @overload
+ def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `max` and `ptp`
+ @overload # type: ignore[override]
+ def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ...
+ @overload
+ def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ...
+ @overload
+ def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `max` and `min`
+ @overload
+ def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ...
+ @overload
+ def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ...
+ @overload
+ def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ...
+ @overload
+ def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `argmin`
+ @overload # type: ignore[override]
+ def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ...
+ @overload
+ def argmax(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ...
+ @overload
+ def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ...
+ @overload
+ def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # keep in sync with `argmax`
+ @overload # type: ignore[override]
+ def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ...
+ @overload
+ def argmin(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ...
+ @overload
+ def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ...
+ @overload
+ def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ #the second overload handles the (rare) case that the matrix is not 2-d
+ @overload
+ def tolist(self: matrix[_2D, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride]
+ @overload
+ def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # these three methods will at least return a `2-d` array of shape (1, n)
+ def squeeze(self, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ...
+ def ravel(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ def flatten(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+
+ # matrix.T is inherited from _ScalarOrArrayCommon
+ def getT(self) -> Self: ...
+ @property
+ def I(self) -> matrix[_2D, Incomplete]: ... # noqa: E743
+ def getI(self) -> matrix[_2D, Incomplete]: ...
+ @property
+ def A(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ...
+ def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ...
+ @property
+ def A1(self) -> ndarray[_AnyShape, _DTypeT_co]: ...
+ def getA1(self) -> ndarray[_AnyShape, _DTypeT_co]: ...
+ @property
+ def H(self) -> matrix[_2D, _DTypeT_co]: ...
+ def getH(self) -> matrix[_2D, _DTypeT_co]: ...
+
+def from_dlpack(
+ x: _SupportsDLPack[None],
+ /,
+ *,
+ device: L["cpu"] | None = None,
+ copy: builtins.bool | None = None,
+) -> NDArray[number | np.bool]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/__config__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__config__.cpython-312.pyc
new file mode 100644
index 00000000..3923032f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__config__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 00000000..0cc8c247
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_array_api_info.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_array_api_info.cpython-312.pyc
new file mode 100644
index 00000000..ba1dbe82
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_array_api_info.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_configtool.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_configtool.cpython-312.pyc
new file mode 100644
index 00000000..b18ebff0
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_configtool.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_distributor_init.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_distributor_init.cpython-312.pyc
new file mode 100644
index 00000000..d296ca98
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_distributor_init.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc
new file mode 100644
index 00000000..35e891e6
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_globals.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_globals.cpython-312.pyc
new file mode 100644
index 00000000..a9788027
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_globals.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/_pytesttester.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_pytesttester.cpython-312.pyc
new file mode 100644
index 00000000..93a18a01
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/_pytesttester.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/conftest.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/conftest.cpython-312.pyc
new file mode 100644
index 00000000..978440a1
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/conftest.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/dtypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/dtypes.cpython-312.pyc
new file mode 100644
index 00000000..04ada8db
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/dtypes.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/exceptions.cpython-312.pyc
new file mode 100644
index 00000000..36313b2e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/exceptions.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/matlib.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/matlib.cpython-312.pyc
new file mode 100644
index 00000000..885a48b8
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/matlib.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/__pycache__/version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/__pycache__/version.cpython-312.pyc
new file mode 100644
index 00000000..1a906605
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/__pycache__/version.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_array_api_info.py b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.py
new file mode 100644
index 00000000..067e3879
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.py
@@ -0,0 +1,346 @@
+"""
+Array API Inspection namespace
+
+This is the namespace for inspection functions as defined by the array API
+standard. See
+https://data-apis.org/array-api/latest/API_specification/inspection.html for
+more details.
+
+"""
+from numpy._core import (
+ bool,
+ complex64,
+ complex128,
+ dtype,
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ intp,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+
+
+class __array_namespace_info__:
+ """
+ Get the array API inspection namespace for NumPy.
+
+ The array API inspection namespace defines the following functions:
+
+ - capabilities()
+ - default_device()
+ - default_dtypes()
+ - dtypes()
+ - devices()
+
+ See
+ https://data-apis.org/array-api/latest/API_specification/inspection.html
+ for more details.
+
+ Returns
+ -------
+ info : ModuleType
+ The array API inspection namespace for NumPy.
+
+ Examples
+ --------
+ >>> info = np.__array_namespace_info__()
+ >>> info.default_dtypes()
+ {'real floating': numpy.float64,
+ 'complex floating': numpy.complex128,
+ 'integral': numpy.int64,
+ 'indexing': numpy.int64}
+
+ """
+
+ __module__ = 'numpy'
+
+ def capabilities(self):
+ """
+ Return a dictionary of array API library capabilities.
+
+ The resulting dictionary has the following keys:
+
+ - **"boolean indexing"**: boolean indicating whether an array library
+ supports boolean indexing. Always ``True`` for NumPy.
+
+ - **"data-dependent shapes"**: boolean indicating whether an array
+ library supports data-dependent output shapes. Always ``True`` for
+ NumPy.
+
+ See
+ https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
+ for more details.
+
+ See Also
+ --------
+ __array_namespace_info__.default_device,
+ __array_namespace_info__.default_dtypes,
+ __array_namespace_info__.dtypes,
+ __array_namespace_info__.devices
+
+ Returns
+ -------
+ capabilities : dict
+ A dictionary of array API library capabilities.
+
+ Examples
+ --------
+ >>> info = np.__array_namespace_info__()
+ >>> info.capabilities()
+ {'boolean indexing': True,
+ 'data-dependent shapes': True,
+ 'max dimensions': 64}
+
+ """
+ return {
+ "boolean indexing": True,
+ "data-dependent shapes": True,
+ "max dimensions": 64,
+ }
+
+ def default_device(self):
+ """
+ The default device used for new NumPy arrays.
+
+ For NumPy, this always returns ``'cpu'``.
+
+ See Also
+ --------
+ __array_namespace_info__.capabilities,
+ __array_namespace_info__.default_dtypes,
+ __array_namespace_info__.dtypes,
+ __array_namespace_info__.devices
+
+ Returns
+ -------
+ device : str
+ The default device used for new NumPy arrays.
+
+ Examples
+ --------
+ >>> info = np.__array_namespace_info__()
+ >>> info.default_device()
+ 'cpu'
+
+ """
+ return "cpu"
+
+ def default_dtypes(self, *, device=None):
+ """
+ The default data types used for new NumPy arrays.
+
+ For NumPy, this always returns the following dictionary:
+
+ - **"real floating"**: ``numpy.float64``
+ - **"complex floating"**: ``numpy.complex128``
+ - **"integral"**: ``numpy.intp``
+ - **"indexing"**: ``numpy.intp``
+
+ Parameters
+ ----------
+ device : str, optional
+ The device to get the default data types for. For NumPy, only
+ ``'cpu'`` is allowed.
+
+ Returns
+ -------
+ dtypes : dict
+ A dictionary describing the default data types used for new NumPy
+ arrays.
+
+ See Also
+ --------
+ __array_namespace_info__.capabilities,
+ __array_namespace_info__.default_device,
+ __array_namespace_info__.dtypes,
+ __array_namespace_info__.devices
+
+ Examples
+ --------
+ >>> info = np.__array_namespace_info__()
+ >>> info.default_dtypes()
+ {'real floating': numpy.float64,
+ 'complex floating': numpy.complex128,
+ 'integral': numpy.int64,
+ 'indexing': numpy.int64}
+
+ """
+ if device not in ["cpu", None]:
+ raise ValueError(
+ 'Device not understood. Only "cpu" is allowed, but received:'
+ f' {device}'
+ )
+ return {
+ "real floating": dtype(float64),
+ "complex floating": dtype(complex128),
+ "integral": dtype(intp),
+ "indexing": dtype(intp),
+ }
+
+ def dtypes(self, *, device=None, kind=None):
+ """
+ The array API data types supported by NumPy.
+
+ Note that this function only returns data types that are defined by
+ the array API.
+
+ Parameters
+ ----------
+ device : str, optional
+ The device to get the data types for. For NumPy, only ``'cpu'`` is
+ allowed.
+ kind : str or tuple of str, optional
+ The kind of data types to return. If ``None``, all data types are
+ returned. If a string, only data types of that kind are returned.
+ If a tuple, a dictionary containing the union of the given kinds
+ is returned. The following kinds are supported:
+
+ - ``'bool'``: boolean data types (i.e., ``bool``).
+ - ``'signed integer'``: signed integer data types (i.e., ``int8``,
+ ``int16``, ``int32``, ``int64``).
+ - ``'unsigned integer'``: unsigned integer data types (i.e.,
+ ``uint8``, ``uint16``, ``uint32``, ``uint64``).
+ - ``'integral'``: integer data types. Shorthand for ``('signed
+ integer', 'unsigned integer')``.
+ - ``'real floating'``: real-valued floating-point data types
+ (i.e., ``float32``, ``float64``).
+ - ``'complex floating'``: complex floating-point data types (i.e.,
+ ``complex64``, ``complex128``).
+ - ``'numeric'``: numeric data types. Shorthand for ``('integral',
+ 'real floating', 'complex floating')``.
+
+ Returns
+ -------
+ dtypes : dict
+ A dictionary mapping the names of data types to the corresponding
+ NumPy data types.
+
+ See Also
+ --------
+ __array_namespace_info__.capabilities,
+ __array_namespace_info__.default_device,
+ __array_namespace_info__.default_dtypes,
+ __array_namespace_info__.devices
+
+ Examples
+ --------
+ >>> info = np.__array_namespace_info__()
+ >>> info.dtypes(kind='signed integer')
+ {'int8': numpy.int8,
+ 'int16': numpy.int16,
+ 'int32': numpy.int32,
+ 'int64': numpy.int64}
+
+ """
+ if device not in ["cpu", None]:
+ raise ValueError(
+ 'Device not understood. Only "cpu" is allowed, but received:'
+ f' {device}'
+ )
+ if kind is None:
+ return {
+ "bool": dtype(bool),
+ "int8": dtype(int8),
+ "int16": dtype(int16),
+ "int32": dtype(int32),
+ "int64": dtype(int64),
+ "uint8": dtype(uint8),
+ "uint16": dtype(uint16),
+ "uint32": dtype(uint32),
+ "uint64": dtype(uint64),
+ "float32": dtype(float32),
+ "float64": dtype(float64),
+ "complex64": dtype(complex64),
+ "complex128": dtype(complex128),
+ }
+ if kind == "bool":
+ return {"bool": bool}
+ if kind == "signed integer":
+ return {
+ "int8": dtype(int8),
+ "int16": dtype(int16),
+ "int32": dtype(int32),
+ "int64": dtype(int64),
+ }
+ if kind == "unsigned integer":
+ return {
+ "uint8": dtype(uint8),
+ "uint16": dtype(uint16),
+ "uint32": dtype(uint32),
+ "uint64": dtype(uint64),
+ }
+ if kind == "integral":
+ return {
+ "int8": dtype(int8),
+ "int16": dtype(int16),
+ "int32": dtype(int32),
+ "int64": dtype(int64),
+ "uint8": dtype(uint8),
+ "uint16": dtype(uint16),
+ "uint32": dtype(uint32),
+ "uint64": dtype(uint64),
+ }
+ if kind == "real floating":
+ return {
+ "float32": dtype(float32),
+ "float64": dtype(float64),
+ }
+ if kind == "complex floating":
+ return {
+ "complex64": dtype(complex64),
+ "complex128": dtype(complex128),
+ }
+ if kind == "numeric":
+ return {
+ "int8": dtype(int8),
+ "int16": dtype(int16),
+ "int32": dtype(int32),
+ "int64": dtype(int64),
+ "uint8": dtype(uint8),
+ "uint16": dtype(uint16),
+ "uint32": dtype(uint32),
+ "uint64": dtype(uint64),
+ "float32": dtype(float32),
+ "float64": dtype(float64),
+ "complex64": dtype(complex64),
+ "complex128": dtype(complex128),
+ }
+ if isinstance(kind, tuple):
+ res = {}
+ for k in kind:
+ res.update(self.dtypes(kind=k))
+ return res
+ raise ValueError(f"unsupported kind: {kind!r}")
+
+ def devices(self):
+ """
+ The devices supported by NumPy.
+
+ For NumPy, this always returns ``['cpu']``.
+
+ Returns
+ -------
+ devices : list of str
+ The devices supported by NumPy.
+
+ See Also
+ --------
+ __array_namespace_info__.capabilities,
+ __array_namespace_info__.default_device,
+ __array_namespace_info__.default_dtypes,
+ __array_namespace_info__.dtypes
+
+ Examples
+ --------
+ >>> info = np.__array_namespace_info__()
+ >>> info.devices()
+ ['cpu']
+
+ """
+ return ["cpu"]
diff --git a/.venv/lib/python3.12/site-packages/numpy/_array_api_info.pyi b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.pyi
new file mode 100644
index 00000000..ee9f8a56
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_array_api_info.pyi
@@ -0,0 +1,207 @@
+from typing import (
+ ClassVar,
+ Literal,
+ Never,
+ TypeAlias,
+ TypedDict,
+ TypeVar,
+ final,
+ overload,
+ type_check_only,
+)
+
+import numpy as np
+
+_Device: TypeAlias = Literal["cpu"]
+_DeviceLike: TypeAlias = _Device | None
+
+_Capabilities = TypedDict(
+ "_Capabilities",
+ {
+ "boolean indexing": Literal[True],
+ "data-dependent shapes": Literal[True],
+ },
+)
+
+_DefaultDTypes = TypedDict(
+ "_DefaultDTypes",
+ {
+ "real floating": np.dtype[np.float64],
+ "complex floating": np.dtype[np.complex128],
+ "integral": np.dtype[np.intp],
+ "indexing": np.dtype[np.intp],
+ },
+)
+
+_KindBool: TypeAlias = Literal["bool"]
+_KindInt: TypeAlias = Literal["signed integer"]
+_KindUInt: TypeAlias = Literal["unsigned integer"]
+_KindInteger: TypeAlias = Literal["integral"]
+_KindFloat: TypeAlias = Literal["real floating"]
+_KindComplex: TypeAlias = Literal["complex floating"]
+_KindNumber: TypeAlias = Literal["numeric"]
+_Kind: TypeAlias = (
+ _KindBool
+ | _KindInt
+ | _KindUInt
+ | _KindInteger
+ | _KindFloat
+ | _KindComplex
+ | _KindNumber
+)
+
+_T1 = TypeVar("_T1")
+_T2 = TypeVar("_T2")
+_T3 = TypeVar("_T3")
+_Permute1: TypeAlias = _T1 | tuple[_T1]
+_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1]
+_Permute3: TypeAlias = (
+ tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2]
+ | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1]
+ | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1]
+)
+
+@type_check_only
+class _DTypesBool(TypedDict):
+ bool: np.dtype[np.bool]
+
+@type_check_only
+class _DTypesInt(TypedDict):
+ int8: np.dtype[np.int8]
+ int16: np.dtype[np.int16]
+ int32: np.dtype[np.int32]
+ int64: np.dtype[np.int64]
+
+@type_check_only
+class _DTypesUInt(TypedDict):
+ uint8: np.dtype[np.uint8]
+ uint16: np.dtype[np.uint16]
+ uint32: np.dtype[np.uint32]
+ uint64: np.dtype[np.uint64]
+
+@type_check_only
+class _DTypesInteger(_DTypesInt, _DTypesUInt): ...
+
+@type_check_only
+class _DTypesFloat(TypedDict):
+ float32: np.dtype[np.float32]
+ float64: np.dtype[np.float64]
+
+@type_check_only
+class _DTypesComplex(TypedDict):
+ complex64: np.dtype[np.complex64]
+ complex128: np.dtype[np.complex128]
+
+@type_check_only
+class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ...
+
+@type_check_only
+class _DTypes(_DTypesBool, _DTypesNumber): ...
+
+@type_check_only
+class _DTypesUnion(TypedDict, total=False):
+ bool: np.dtype[np.bool]
+ int8: np.dtype[np.int8]
+ int16: np.dtype[np.int16]
+ int32: np.dtype[np.int32]
+ int64: np.dtype[np.int64]
+ uint8: np.dtype[np.uint8]
+ uint16: np.dtype[np.uint16]
+ uint32: np.dtype[np.uint32]
+ uint64: np.dtype[np.uint64]
+ float32: np.dtype[np.float32]
+ float64: np.dtype[np.float64]
+ complex64: np.dtype[np.complex64]
+ complex128: np.dtype[np.complex128]
+
+_EmptyDict: TypeAlias = dict[Never, Never]
+
+@final
+class __array_namespace_info__:
+ __module__: ClassVar[Literal['numpy']]
+
+ def capabilities(self) -> _Capabilities: ...
+ def default_device(self) -> _Device: ...
+ def default_dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ ) -> _DefaultDTypes: ...
+ def devices(self) -> list[_Device]: ...
+
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: None = ...,
+ ) -> _DTypes: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: _Permute1[_KindBool],
+ ) -> _DTypesBool: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: _Permute1[_KindInt],
+ ) -> _DTypesInt: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: _Permute1[_KindUInt],
+ ) -> _DTypesUInt: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: _Permute1[_KindFloat],
+ ) -> _DTypesFloat: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: _Permute1[_KindComplex],
+ ) -> _DTypesComplex: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: (
+ _Permute1[_KindInteger]
+ | _Permute2[_KindInt, _KindUInt]
+ ),
+ ) -> _DTypesInteger: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: (
+ _Permute1[_KindNumber]
+ | _Permute3[_KindInteger, _KindFloat, _KindComplex]
+ ),
+ ) -> _DTypesNumber: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: tuple[()],
+ ) -> _EmptyDict: ...
+ @overload
+ def dtypes(
+ self,
+ *,
+ device: _DeviceLike = ...,
+ kind: tuple[_Kind, ...],
+ ) -> _DTypesUnion: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_configtool.py b/.venv/lib/python3.12/site-packages/numpy/_configtool.py
new file mode 100644
index 00000000..db7831c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_configtool.py
@@ -0,0 +1,39 @@
+import argparse
+import sys
+from pathlib import Path
+
+from .lib._utils_impl import get_include
+from .version import __version__
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--version",
+ action="version",
+ version=__version__,
+ help="Print the version and exit.",
+ )
+ parser.add_argument(
+ "--cflags",
+ action="store_true",
+ help="Compile flag needed when using the NumPy headers.",
+ )
+ parser.add_argument(
+ "--pkgconfigdir",
+ action="store_true",
+ help=("Print the pkgconfig directory in which `numpy.pc` is stored "
+ "(useful for setting $PKG_CONFIG_PATH)."),
+ )
+ args = parser.parse_args()
+ if not sys.argv[1:]:
+ parser.print_help()
+ if args.cflags:
+ print("-I" + get_include())
+ if args.pkgconfigdir:
+ _path = Path(get_include()) / '..' / 'lib' / 'pkgconfig'
+ print(_path.resolve())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.venv/lib/python3.12/site-packages/numpy/_configtool.pyi b/.venv/lib/python3.12/site-packages/numpy/_configtool.pyi
new file mode 100644
index 00000000..7e7363e7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_configtool.pyi
@@ -0,0 +1 @@
+def main() -> None: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__init__.py b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.py
new file mode 100644
index 00000000..d0da7e0a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.py
@@ -0,0 +1,186 @@
+"""
+Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
+
+Please note that this module is private. All functions and objects
+are available in the main ``numpy`` namespace - use that instead.
+
+"""
+
+import os
+
+from numpy.version import version as __version__
+
+# disables OpenBLAS affinity setting of the main thread that limits
+# python threads or processes to one core
+env_added = []
+for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
+ if envkey not in os.environ:
+ os.environ[envkey] = '1'
+ env_added.append(envkey)
+
+try:
+ from . import multiarray
+except ImportError as exc:
+ import sys
+ msg = """
+
+IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
+
+Importing the numpy C-extensions failed. This error can happen for
+many reasons, often due to issues with your setup or how NumPy was
+installed.
+
+We have compiled some common reasons and troubleshooting tips at:
+
+ https://numpy.org/devdocs/user/troubleshooting-importerror.html
+
+Please note and check the following:
+
+ * The Python version is: Python%d.%d from "%s"
+ * The NumPy version is: "%s"
+
+and make sure that they are the versions you expect.
+Please carefully study the documentation linked above for further help.
+
+Original error was: %s
+""" % (sys.version_info[0], sys.version_info[1], sys.executable,
+ __version__, exc)
+ raise ImportError(msg) from exc
+finally:
+ for envkey in env_added:
+ del os.environ[envkey]
+del envkey
+del env_added
+del os
+
+from . import umath
+
+# Check that multiarray,umath are pure python modules wrapping
+# _multiarray_umath and not either of the old c-extension modules
+if not (hasattr(multiarray, '_multiarray_umath') and
+ hasattr(umath, '_multiarray_umath')):
+ import sys
+ path = sys.modules['numpy'].__path__
+ msg = ("Something is wrong with the numpy installation. "
+ "While importing we detected an older version of "
+ "numpy in {}. One method of fixing this is to repeatedly uninstall "
+ "numpy until none is found, then reinstall this version.")
+ raise ImportError(msg.format(path))
+
+from . import numerictypes as nt
+from .numerictypes import sctypeDict, sctypes
+
+multiarray.set_typeDict(nt.sctypeDict)
+from . import (
+ _machar,
+ einsumfunc,
+ fromnumeric,
+ function_base,
+ getlimits,
+ numeric,
+ shape_base,
+)
+from .einsumfunc import *
+from .fromnumeric import *
+from .function_base import *
+from .getlimits import *
+
+# Note: module name memmap is overwritten by a class with same name
+from .memmap import *
+from .numeric import *
+from .records import recarray, record
+from .shape_base import *
+
+del nt
+
+# do this after everything else, to minimize the chance of this misleadingly
+# appearing in an import-time traceback
+# add these for module-freeze analysis (like PyInstaller)
+from . import (
+ _add_newdocs,
+ _add_newdocs_scalars,
+ _dtype,
+ _dtype_ctypes,
+ _internal,
+ _methods,
+)
+from .numeric import absolute as abs
+
+acos = numeric.arccos
+acosh = numeric.arccosh
+asin = numeric.arcsin
+asinh = numeric.arcsinh
+atan = numeric.arctan
+atanh = numeric.arctanh
+atan2 = numeric.arctan2
+concat = numeric.concatenate
+bitwise_left_shift = numeric.left_shift
+bitwise_invert = numeric.invert
+bitwise_right_shift = numeric.right_shift
+permute_dims = numeric.transpose
+pow = numeric.power
+
+__all__ = [
+ "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2",
+ "bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat",
+ "pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray"
+]
+__all__ += numeric.__all__
+__all__ += function_base.__all__
+__all__ += getlimits.__all__
+__all__ += shape_base.__all__
+__all__ += einsumfunc.__all__
+
+
+def _ufunc_reduce(func):
+ # Report the `__name__`. pickle will try to find the module. Note that
+ # pickle supports for this `__name__` to be a `__qualname__`. It may
+ # make sense to add a `__qualname__` to ufuncs, to allow this more
+ # explicitly (Numba has ufuncs as attributes).
+ # See also: https://github.com/dask/distributed/issues/3450
+ return func.__name__
+
+
+def _DType_reconstruct(scalar_type):
+ # This is a work-around to pickle type(np.dtype(np.float64)), etc.
+ # and it should eventually be replaced with a better solution, e.g. when
+ # DTypes become HeapTypes.
+ return type(dtype(scalar_type))
+
+
+def _DType_reduce(DType):
+ # As types/classes, most DTypes can simply be pickled by their name:
+ if not DType._legacy or DType.__module__ == "numpy.dtypes":
+ return DType.__name__
+
+ # However, user defined legacy dtypes (like rational) do not end up in
+ # `numpy.dtypes` as module and do not have a public class at all.
+ # For these, we pickle them by reconstructing them from the scalar type:
+ scalar_type = DType.type
+ return _DType_reconstruct, (scalar_type,)
+
+
+def __getattr__(name):
+ # Deprecated 2022-11-22, NumPy 1.25.
+ if name == "MachAr":
+ import warnings
+ warnings.warn(
+ "The `np._core.MachAr` is considered private API (NumPy 1.24)",
+ DeprecationWarning, stacklevel=2,
+ )
+ return _machar.MachAr
+ raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
+
+
+import copyreg
+
+copyreg.pickle(ufunc, _ufunc_reduce)
+copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
+
+# Unclutter namespace (must keep _*_reconstruct for unpickling)
+del copyreg, _ufunc_reduce, _DType_reduce
+
+from numpy._pytesttester import PytestTester
+
+test = PytestTester(__name__)
+del PytestTester
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.pyi
new file mode 100644
index 00000000..40d9c411
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/__init__.pyi
@@ -0,0 +1,2 @@
+# NOTE: The `np._core` namespace is deliberately kept empty due to it
+# being private
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 00000000..a9e957a2
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc
new file mode 100644
index 00000000..1ad890fd
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc
new file mode 100644
index 00000000..488d8543
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_asarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_asarray.cpython-312.pyc
new file mode 100644
index 00000000..70c9a9d3
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_asarray.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype.cpython-312.pyc
new file mode 100644
index 00000000..683628f4
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc
new file mode 100644
index 00000000..9ab2dbd7
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_exceptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_exceptions.cpython-312.pyc
new file mode 100644
index 00000000..3832e4fc
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_exceptions.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_internal.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_internal.cpython-312.pyc
new file mode 100644
index 00000000..15c5df7a
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_internal.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_machar.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_machar.cpython-312.pyc
new file mode 100644
index 00000000..900e1f51
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_machar.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_methods.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_methods.cpython-312.pyc
new file mode 100644
index 00000000..f47eb09e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_methods.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc
new file mode 100644
index 00000000..f3595d0f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc
new file mode 100644
index 00000000..40287bca
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc
new file mode 100644
index 00000000..892a361f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/arrayprint.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/arrayprint.cpython-312.pyc
new file mode 100644
index 00000000..ca1b1479
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/arrayprint.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/cversions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/cversions.cpython-312.pyc
new file mode 100644
index 00000000..cd4fb0f7
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/cversions.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/defchararray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/defchararray.cpython-312.pyc
new file mode 100644
index 00000000..28558735
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/defchararray.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc
new file mode 100644
index 00000000..781a567c
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc
new file mode 100644
index 00000000..dd7e06ac
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/function_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/function_base.cpython-312.pyc
new file mode 100644
index 00000000..7d0147d6
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/function_base.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/getlimits.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/getlimits.cpython-312.pyc
new file mode 100644
index 00000000..24a5bce1
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/getlimits.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/memmap.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/memmap.cpython-312.pyc
new file mode 100644
index 00000000..f29c5da2
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/memmap.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/multiarray.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/multiarray.cpython-312.pyc
new file mode 100644
index 00000000..e2fb708f
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/multiarray.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numeric.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numeric.cpython-312.pyc
new file mode 100644
index 00000000..b31295d5
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numeric.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numerictypes.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numerictypes.cpython-312.pyc
new file mode 100644
index 00000000..0338e30c
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/numerictypes.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/overrides.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/overrides.cpython-312.pyc
new file mode 100644
index 00000000..e3fc62eb
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/overrides.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/printoptions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/printoptions.cpython-312.pyc
new file mode 100644
index 00000000..b3e30d41
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/printoptions.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/records.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/records.cpython-312.pyc
new file mode 100644
index 00000000..caa3b342
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/records.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/shape_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/shape_base.cpython-312.pyc
new file mode 100644
index 00000000..77d31848
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/shape_base.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/strings.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/strings.cpython-312.pyc
new file mode 100644
index 00000000..96bd1d32
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/strings.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/umath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/umath.cpython-312.pyc
new file mode 100644
index 00000000..fc0946ac
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/__pycache__/umath.cpython-312.pyc differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.py b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.py
new file mode 100644
index 00000000..8f5de4b7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.py
@@ -0,0 +1,6967 @@
+"""
+This is only meant to add docs to objects defined in C-extension modules.
+The purpose is to allow easier editing of the docstrings without
+requiring a re-compile.
+
+NOTE: Many of the methods of ndarray have corresponding functions.
+ If you update these docstrings, please keep also the ones in
+ _core/fromnumeric.py, matrixlib/defmatrix.py up-to-date.
+
+"""
+
+from numpy._core.function_base import add_newdoc
+from numpy._core.overrides import get_array_function_like_doc # noqa: F401
+
+###############################################################################
+#
+# flatiter
+#
+# flatiter needs a toplevel description
+#
+###############################################################################
+
+add_newdoc('numpy._core', 'flatiter',
+ """
+ Flat iterator object to iterate over arrays.
+
+ A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
+ It allows iterating over the array as if it were a 1-D array,
+ either in a for-loop or by calling its `next` method.
+
+ Iteration is done in row-major, C-style order (the last
+ index varying the fastest). The iterator can also be indexed using
+ basic slicing or advanced indexing.
+
+ See Also
+ --------
+ ndarray.flat : Return a flat iterator over an array.
+ ndarray.flatten : Returns a flattened copy of an array.
+
+ Notes
+ -----
+ A `flatiter` iterator can not be constructed directly from Python code
+ by calling the `flatiter` constructor.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> type(fl)
+
+ >>> for item in fl:
+ ... print(item)
+ ...
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+
+ >>> fl[2:4]
+ array([2, 3])
+
+ """)
+
+# flatiter attributes
+
+add_newdoc('numpy._core', 'flatiter', ('base',
+ """
+ A reference to the array that is iterated over.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(5)
+ >>> fl = x.flat
+ >>> fl.base is x
+ True
+
+ """))
+
+
+add_newdoc('numpy._core', 'flatiter', ('coords',
+ """
+ An N-dimensional tuple of current coordinates.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> fl.coords
+ (0, 0)
+ >>> next(fl)
+ 0
+ >>> fl.coords
+ (0, 1)
+
+ """))
+
+
+add_newdoc('numpy._core', 'flatiter', ('index',
+ """
+ Current flat index into the array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> fl.index
+ 0
+ >>> next(fl)
+ 0
+ >>> fl.index
+ 1
+
+ """))
+
+# flatiter functions
+
+add_newdoc('numpy._core', 'flatiter', ('__array__',
+ """__array__(type=None) Get array from iterator
+
+ """))
+
+
+add_newdoc('numpy._core', 'flatiter', ('copy',
+ """
+ copy()
+
+ Get a copy of the iterator as a 1-D array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> fl = x.flat
+ >>> fl.copy()
+ array([0, 1, 2, 3, 4, 5])
+
+ """))
+
+
+###############################################################################
+#
+# nditer
+#
+###############################################################################
+
+add_newdoc('numpy._core', 'nditer',
+ """
+ nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K',
+ casting='safe', op_axes=None, itershape=None, buffersize=0)
+
+ Efficient multi-dimensional iterator object to iterate over arrays.
+ To get started using this object, see the
+ :ref:`introductory guide to array iteration `.
+
+ Parameters
+ ----------
+ op : ndarray or sequence of array_like
+ The array(s) to iterate over.
+
+ flags : sequence of str, optional
+ Flags to control the behavior of the iterator.
+
+ * ``buffered`` enables buffering when required.
+ * ``c_index`` causes a C-order index to be tracked.
+ * ``f_index`` causes a Fortran-order index to be tracked.
+ * ``multi_index`` causes a multi-index, or a tuple of indices
+ with one per iteration dimension, to be tracked.
+ * ``common_dtype`` causes all the operands to be converted to
+ a common data type, with copying or buffering as necessary.
+ * ``copy_if_overlap`` causes the iterator to determine if read
+ operands have overlap with write operands, and make temporary
+ copies as necessary to avoid overlap. False positives (needless
+ copying) are possible in some cases.
+ * ``delay_bufalloc`` delays allocation of the buffers until
+ a reset() call is made. Allows ``allocate`` operands to
+ be initialized before their values are copied into the buffers.
+ * ``external_loop`` causes the ``values`` given to be
+ one-dimensional arrays with multiple values instead of
+ zero-dimensional arrays.
+ * ``grow_inner`` allows the ``value`` array sizes to be made
+ larger than the buffer size when both ``buffered`` and
+ ``external_loop`` is used.
+ * ``ranged`` allows the iterator to be restricted to a sub-range
+ of the iterindex values.
+ * ``refs_ok`` enables iteration of reference types, such as
+ object arrays.
+ * ``reduce_ok`` enables iteration of ``readwrite`` operands
+ which are broadcasted, also known as reduction operands.
+ * ``zerosize_ok`` allows `itersize` to be zero.
+ op_flags : list of list of str, optional
+ This is a list of flags for each operand. At minimum, one of
+ ``readonly``, ``readwrite``, or ``writeonly`` must be specified.
+
+ * ``readonly`` indicates the operand will only be read from.
+ * ``readwrite`` indicates the operand will be read from and written to.
+ * ``writeonly`` indicates the operand will only be written to.
+ * ``no_broadcast`` prevents the operand from being broadcasted.
+ * ``contig`` forces the operand data to be contiguous.
+ * ``aligned`` forces the operand data to be aligned.
+ * ``nbo`` forces the operand data to be in native byte order.
+ * ``copy`` allows a temporary read-only copy if required.
+ * ``updateifcopy`` allows a temporary read-write copy if required.
+ * ``allocate`` causes the array to be allocated if it is None
+ in the ``op`` parameter.
+ * ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
+ * ``arraymask`` indicates that this operand is the mask to use
+ for selecting elements when writing to operands with the
+ 'writemasked' flag set. The iterator does not enforce this,
+ but when writing from a buffer back to the array, it only
+ copies those elements indicated by this mask.
+ * ``writemasked`` indicates that only elements where the chosen
+ ``arraymask`` operand is True will be written to.
+ * ``overlap_assume_elementwise`` can be used to mark operands that are
+ accessed only in the iterator order, to allow less conservative
+ copying when ``copy_if_overlap`` is present.
+ op_dtypes : dtype or tuple of dtype(s), optional
+ The required data type(s) of the operands. If copying or buffering
+ is enabled, the data will be converted to/from their original types.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the iteration order. 'C' means C order, 'F' means
+ Fortran order, 'A' means 'F' order if all the arrays are Fortran
+ contiguous, 'C' order otherwise, and 'K' means as close to the
+ order the array elements appear in memory as possible. This also
+ affects the element memory order of ``allocate`` operands, as they
+ are allocated to be compatible with iteration order.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when making a copy
+ or buffering. Setting this to 'unsafe' is not recommended,
+ as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ op_axes : list of list of ints, optional
+ If provided, is a list of ints or None for each operands.
+ The list of axes for an operand is a mapping from the dimensions
+ of the iterator to the dimensions of the operand. A value of
+ -1 can be placed for entries, causing that dimension to be
+ treated as `newaxis`.
+ itershape : tuple of ints, optional
+ The desired shape of the iterator. This allows ``allocate`` operands
+ with a dimension mapped by op_axes not corresponding to a dimension
+ of a different operand to get a value not equal to 1 for that
+ dimension.
+ buffersize : int, optional
+ When buffering is enabled, controls the size of the temporary
+ buffers. Set to 0 for the default value.
+
+ Attributes
+ ----------
+ dtypes : tuple of dtype(s)
+ The data types of the values provided in `value`. This may be
+ different from the operand data types if buffering is enabled.
+ Valid only before the iterator is closed.
+ finished : bool
+ Whether the iteration over the operands is finished or not.
+ has_delayed_bufalloc : bool
+ If True, the iterator was created with the ``delay_bufalloc`` flag,
+ and no reset() function was called on it yet.
+ has_index : bool
+ If True, the iterator was created with either the ``c_index`` or
+ the ``f_index`` flag, and the property `index` can be used to
+ retrieve it.
+ has_multi_index : bool
+ If True, the iterator was created with the ``multi_index`` flag,
+ and the property `multi_index` can be used to retrieve it.
+ index
+ When the ``c_index`` or ``f_index`` flag was used, this property
+ provides access to the index. Raises a ValueError if accessed
+ and ``has_index`` is False.
+ iterationneedsapi : bool
+ Whether iteration requires access to the Python API, for example
+ if one of the operands is an object array.
+ iterindex : int
+ An index which matches the order of iteration.
+ itersize : int
+ Size of the iterator.
+ itviews
+ Structured view(s) of `operands` in memory, matching the reordered
+ and optimized iterator access pattern. Valid only before the iterator
+ is closed.
+ multi_index
+ When the ``multi_index`` flag was used, this property
+ provides access to the index. Raises a ValueError if accessed
+ accessed and ``has_multi_index`` is False.
+ ndim : int
+ The dimensions of the iterator.
+ nop : int
+ The number of iterator operands.
+ operands : tuple of operand(s)
+ The array(s) to be iterated over. Valid only before the iterator is
+ closed.
+ shape : tuple of ints
+ Shape tuple, the shape of the iterator.
+ value
+ Value of ``operands`` at current iteration. Normally, this is a
+ tuple of array scalars, but if the flag ``external_loop`` is used,
+ it is a tuple of one dimensional arrays.
+
+ Notes
+ -----
+ `nditer` supersedes `flatiter`. The iterator implementation behind
+ `nditer` is also exposed by the NumPy C API.
+
+ The Python exposure supplies two iteration interfaces, one which follows
+ the Python iterator protocol, and another which mirrors the C-style
+ do-while pattern. The native Python approach is better in most cases, but
+ if you need the coordinates or index of an iterator, use the C-style pattern.
+
+ Examples
+ --------
+ Here is how we might write an ``iter_add`` function, using the
+ Python iterator protocol:
+
+ >>> import numpy as np
+
+ >>> def iter_add_py(x, y, out=None):
+ ... addop = np.add
+ ... it = np.nditer([x, y, out], [],
+ ... [['readonly'], ['readonly'], ['writeonly','allocate']])
+ ... with it:
+ ... for (a, b, c) in it:
+ ... addop(a, b, out=c)
+ ... return it.operands[2]
+
+ Here is the same function, but following the C-style pattern:
+
+ >>> def iter_add(x, y, out=None):
+ ... addop = np.add
+ ... it = np.nditer([x, y, out], [],
+ ... [['readonly'], ['readonly'], ['writeonly','allocate']])
+ ... with it:
+ ... while not it.finished:
+ ... addop(it[0], it[1], out=it[2])
+ ... it.iternext()
+ ... return it.operands[2]
+
+ Here is an example outer product function:
+
+ >>> def outer_it(x, y, out=None):
+ ... mulop = np.multiply
+ ... it = np.nditer([x, y, out], ['external_loop'],
+ ... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
+ ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
+ ... [-1] * x.ndim + list(range(y.ndim)),
+ ... None])
+ ... with it:
+ ... for (a, b, c) in it:
+ ... mulop(a, b, out=c)
+ ... return it.operands[2]
+
+ >>> a = np.arange(2)+1
+ >>> b = np.arange(3)+1
+ >>> outer_it(a,b)
+ array([[1, 2, 3],
+ [2, 4, 6]])
+
+ Here is an example function which operates like a "lambda" ufunc:
+
+ >>> def luf(lamdaexpr, *args, **kwargs):
+ ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
+ ... nargs = len(args)
+ ... op = (kwargs.get('out',None),) + args
+ ... it = np.nditer(op, ['buffered','external_loop'],
+ ... [['writeonly','allocate','no_broadcast']] +
+ ... [['readonly','nbo','aligned']]*nargs,
+ ... order=kwargs.get('order','K'),
+ ... casting=kwargs.get('casting','safe'),
+ ... buffersize=kwargs.get('buffersize',0))
+ ... while not it.finished:
+ ... it[0] = lamdaexpr(*it[1:])
+ ... it.iternext()
+ ... return it.operands[0]
+
+ >>> a = np.arange(5)
+ >>> b = np.ones(5)
+ >>> luf(lambda i,j:i*i + j/2, a, b)
+ array([ 0.5, 1.5, 4.5, 9.5, 16.5])
+
+ If operand flags ``"writeonly"`` or ``"readwrite"`` are used the
+ operands may be views into the original data with the
+ `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
+ context manager or the `nditer.close` method must be called before
+ using the result. The temporary data will be written back to the
+ original data when the :meth:`~object.__exit__` function is called
+ but not before:
+
+ >>> a = np.arange(6, dtype='i4')[::-2]
+ >>> with np.nditer(a, [],
+ ... [['writeonly', 'updateifcopy']],
+ ... casting='unsafe',
+ ... op_dtypes=[np.dtype('f4')]) as i:
+ ... x = i.operands[0]
+ ... x[:] = [-1, -2, -3]
+ ... # a still unchanged here
+ >>> a, x
+ (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
+
+ It is important to note that once the iterator is exited, dangling
+ references (like `x` in the example) may or may not share data with
+ the original data `a`. If writeback semantics were active, i.e. if
+ `x.base.flags.writebackifcopy` is `True`, then exiting the iterator
+ will sever the connection between `x` and `a`, writing to `x` will
+ no longer write to `a`. If writeback semantics are not active, then
+ `x.data` will still point at some part of `a.data`, and writing to
+ one will affect the other.
+
+ Context management and the `close` method appeared in version 1.15.0.
+
+ """)
+
+# nditer methods
+
+add_newdoc('numpy._core', 'nditer', ('copy',
+ """
+ copy()
+
+ Get a copy of the iterator in its current state.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(10)
+ >>> y = x + 1
+ >>> it = np.nditer([x, y])
+ >>> next(it)
+ (array(0), array(1))
+ >>> it2 = it.copy()
+ >>> next(it2)
+ (array(1), array(2))
+
+ """))
+
+add_newdoc('numpy._core', 'nditer', ('operands',
+ """
+ operands[`Slice`]
+
+ The array(s) to be iterated over. Valid only before the iterator is closed.
+ """))
+
+add_newdoc('numpy._core', 'nditer', ('debug_print',
+ """
+ debug_print()
+
+ Print the current state of the `nditer` instance and debug info to stdout.
+
+ """))
+
+add_newdoc('numpy._core', 'nditer', ('enable_external_loop',
+ """
+ enable_external_loop()
+
+ When the "external_loop" was not used during construction, but
+ is desired, this modifies the iterator to behave as if the flag
+ was specified.
+
+ """))
+
+add_newdoc('numpy._core', 'nditer', ('iternext',
+ """
+ iternext()
+
+ Check whether iterations are left, and perform a single internal iteration
+ without returning the result. Used in the C-style pattern do-while
+ pattern. For an example, see `nditer`.
+
+ Returns
+ -------
+ iternext : bool
+ Whether or not there are iterations left.
+
+ """))
+
+add_newdoc('numpy._core', 'nditer', ('remove_axis',
+ """
+ remove_axis(i, /)
+
+ Removes axis `i` from the iterator. Requires that the flag "multi_index"
+ be enabled.
+
+ """))
+
+add_newdoc('numpy._core', 'nditer', ('remove_multi_index',
+ """
+ remove_multi_index()
+
+ When the "multi_index" flag was specified, this removes it, allowing
+ the internal iteration structure to be optimized further.
+
+ """))
+
+add_newdoc('numpy._core', 'nditer', ('reset',
+ """
+ reset()
+
+ Reset the iterator to its initial state.
+
+ """))
+
+add_newdoc('numpy._core', 'nested_iters',
+ """
+ nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \
+ order="K", casting="safe", buffersize=0)
+
+ Create nditers for use in nested loops
+
+ Create a tuple of `nditer` objects which iterate in nested loops over
+ different axes of the op argument. The first iterator is used in the
+ outermost loop, the last in the innermost loop. Advancing one will change
+ the subsequent iterators to point at its new element.
+
+ Parameters
+ ----------
+ op : ndarray or sequence of array_like
+ The array(s) to iterate over.
+
+ axes : list of list of int
+ Each item is used as an "op_axes" argument to an nditer
+
+ flags, op_flags, op_dtypes, order, casting, buffersize (optional)
+ See `nditer` parameters of the same name
+
+ Returns
+ -------
+ iters : tuple of nditer
+ An nditer for each item in `axes`, outermost first
+
+ See Also
+ --------
+ nditer
+
+ Examples
+ --------
+
+ Basic usage. Note how y is the "flattened" version of
+ [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
+ the first iter's axes as [1]
+
+ >>> import numpy as np
+ >>> a = np.arange(12).reshape(2, 3, 2)
+ >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
+ >>> for x in i:
+ ... print(i.multi_index)
+ ... for y in j:
+ ... print('', j.multi_index, y)
+ (0,)
+ (0, 0) 0
+ (0, 1) 1
+ (1, 0) 6
+ (1, 1) 7
+ (1,)
+ (0, 0) 2
+ (0, 1) 3
+ (1, 0) 8
+ (1, 1) 9
+ (2,)
+ (0, 0) 4
+ (0, 1) 5
+ (1, 0) 10
+ (1, 1) 11
+
+ """)
+
+add_newdoc('numpy._core', 'nditer', ('close',
+ """
+ close()
+
+ Resolve all writeback semantics in writeable operands.
+
+ See Also
+ --------
+
+ :ref:`nditer-context-manager`
+
+ """))
+
+
+###############################################################################
+#
+# broadcast
+#
+###############################################################################
+
+add_newdoc('numpy._core', 'broadcast',
+ """
+ Produce an object that mimics broadcasting.
+
+ Parameters
+ ----------
+ in1, in2, ... : array_like
+ Input parameters.
+
+ Returns
+ -------
+ b : broadcast object
+ Broadcast the input parameters against one another, and
+ return an object that encapsulates the result.
+ Amongst others, it has ``shape`` and ``nd`` properties, and
+ may be used as an iterator.
+
+ See Also
+ --------
+ broadcast_arrays
+ broadcast_to
+ broadcast_shapes
+
+ Examples
+ --------
+
+ Manually adding two vectors, using broadcasting:
+
+ >>> import numpy as np
+ >>> x = np.array([[1], [2], [3]])
+ >>> y = np.array([4, 5, 6])
+ >>> b = np.broadcast(x, y)
+
+ >>> out = np.empty(b.shape)
+ >>> out.flat = [u+v for (u,v) in b]
+ >>> out
+ array([[5., 6., 7.],
+ [6., 7., 8.],
+ [7., 8., 9.]])
+
+ Compare against built-in broadcasting:
+
+ >>> x + y
+ array([[5, 6, 7],
+ [6, 7, 8],
+ [7, 8, 9]])
+
+ """)
+
+# attributes
+
+add_newdoc('numpy._core', 'broadcast', ('index',
+ """
+ current index in broadcasted result
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> x = np.array([[1], [2], [3]])
+ >>> y = np.array([4, 5, 6])
+ >>> b = np.broadcast(x, y)
+ >>> b.index
+ 0
+ >>> next(b), next(b), next(b)
+ ((1, 4), (1, 5), (1, 6))
+ >>> b.index
+ 3
+
+ """))
+
+add_newdoc('numpy._core', 'broadcast', ('iters',
+ """
+ tuple of iterators along ``self``'s "components."
+
+ Returns a tuple of `numpy.flatiter` objects, one for each "component"
+ of ``self``.
+
+ See Also
+ --------
+ numpy.flatiter
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> row, col = b.iters
+ >>> next(row), next(col)
+ (1, 4)
+
+ """))
+
+add_newdoc('numpy._core', 'broadcast', ('ndim',
+ """
+ Number of dimensions of broadcasted result. Alias for `nd`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.ndim
+ 2
+
+ """))
+
+add_newdoc('numpy._core', 'broadcast', ('nd',
+ """
+ Number of dimensions of broadcasted result. For code intended for NumPy
+ 1.12.0 and later the more consistent `ndim` is preferred.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.nd
+ 2
+
+ """))
+
+add_newdoc('numpy._core', 'broadcast', ('numiter',
+ """
+ Number of iterators possessed by the broadcasted result.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.numiter
+ 2
+
+ """))
+
+add_newdoc('numpy._core', 'broadcast', ('shape',
+ """
+ Shape of broadcasted result.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.shape
+ (3, 3)
+
+ """))
+
+add_newdoc('numpy._core', 'broadcast', ('size',
+ """
+ Total size of broadcasted result.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.size
+ 9
+
+ """))
+
+add_newdoc('numpy._core', 'broadcast', ('reset',
+ """
+ reset()
+
+ Reset the broadcasted result's iterator(s).
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.index
+ 0
+ >>> next(b), next(b), next(b)
+ ((1, 4), (2, 4), (3, 4))
+ >>> b.index
+ 3
+ >>> b.reset()
+ >>> b.index
+ 0
+
+ """))
+
+###############################################################################
+#
+# numpy functions
+#
+###############################################################################
+
+add_newdoc('numpy._core.multiarray', 'array',
+ """
+ array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
+ like=None)
+
+ Create an array.
+
+ Parameters
+ ----------
+ object : array_like
+ An array, any object exposing the array interface, an object whose
+ ``__array__`` method returns an array, or any (nested) sequence.
+ If object is a scalar, a 0-dimensional array containing object is
+ returned.
+ dtype : data-type, optional
+ The desired data-type for the array. If not given, NumPy will try to use
+ a default ``dtype`` that can represent the values (by applying promotion
+ rules when necessary.)
+ copy : bool, optional
+ If ``True`` (default), then the array data is copied. If ``None``,
+ a copy will only be made if ``__array__`` returns a copy, if obj is
+ a nested sequence, or if a copy is needed to satisfy any of the other
+ requirements (``dtype``, ``order``, etc.). Note that any copy of
+ the data is shallow, i.e., for arrays with object dtype, the new
+ array will point to the same objects. See Examples for `ndarray.copy`.
+ For ``False`` it raises a ``ValueError`` if a copy cannot be avoided.
+ Default: ``True``.
+ order : {'K', 'A', 'C', 'F'}, optional
+ Specify the memory layout of the array. If object is not an array, the
+ newly created array will be in C order (row major) unless 'F' is
+ specified, in which case it will be in Fortran order (column major).
+ If object is an array the following holds.
+
+ ===== ========= ===================================================
+ order no copy copy=True
+ ===== ========= ===================================================
+ 'K' unchanged F & C order preserved, otherwise most similar order
+ 'A' unchanged F order if input is F and not C, otherwise C order
+ 'C' C order C order
+ 'F' F order F order
+ ===== ========= ===================================================
+
+ When ``copy=None`` and a copy is made for other reasons, the result is
+ the same as if ``copy=True``, with some exceptions for 'A', see the
+ Notes section. The default order is 'K'.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned array will be forced to be a base-class array (default).
+ ndmin : int, optional
+ Specifies the minimum number of dimensions that the resulting
+ array should have. Ones will be prepended to the shape as
+ needed to meet this requirement.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ An array object satisfying the specified requirements.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+ copy: Return an array copy of the given object.
+
+
+ Notes
+ -----
+ When order is 'A' and ``object`` is an array in neither 'C' nor 'F' order,
+ and a copy is forced by a change in dtype, then the order of the result is
+ not necessarily 'C' as expected. This is likely a bug.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.array([1, 2, 3])
+ array([1, 2, 3])
+
+ Upcasting:
+
+ >>> np.array([1, 2, 3.0])
+ array([ 1., 2., 3.])
+
+ More than one dimension:
+
+ >>> np.array([[1, 2], [3, 4]])
+ array([[1, 2],
+ [3, 4]])
+
+ Minimum dimensions 2:
+
+ >>> np.array([1, 2, 3], ndmin=2)
+ array([[1, 2, 3]])
+
+ Type provided:
+
+ >>> np.array([1, 2, 3], dtype=complex)
+ array([ 1.+0.j, 2.+0.j, 3.+0.j])
+
+ Data-type consisting of more than one element:
+
+ >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a']
+ array([1, 3], dtype=int32)
+
+ Creating an array from sub-classes:
+
+ >>> np.array(np.asmatrix('1 2; 3 4'))
+ array([[1, 2],
+ [3, 4]])
+
+ >>> np.array(np.asmatrix('1 2; 3 4'), subok=True)
+ matrix([[1, 2],
+ [3, 4]])
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'asarray',
+ """
+ asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None)
+
+ Convert the input to an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes lists, lists of tuples, tuples, tuples of tuples, tuples
+ of lists and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Memory layout. 'A' and 'K' depend on the order of input array a.
+ 'C' row-major (C-style),
+ 'F' column-major (Fortran-style) memory representation.
+ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+ 'K' (keep) preserve input order
+ Defaults to 'K'.
+ device : str, optional
+ The device on which to place the created array. Default: ``None``.
+ For Array-API interoperability only, so must be ``"cpu"`` if passed.
+
+ .. versionadded:: 2.0.0
+ copy : bool, optional
+ If ``True``, then the object is copied. If ``None`` then the object is
+ copied only if needed, i.e. if ``__array__`` returns a copy, if obj
+ is a nested sequence, or if a copy is needed to satisfy any of
+ the other requirements (``dtype``, ``order``, etc.).
+ For ``False`` it raises a ``ValueError`` if a copy cannot be avoided.
+ Default: ``None``.
+
+ .. versionadded:: 2.0.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array interpretation of ``a``. No copy is performed if the input
+ is already an ndarray with matching dtype and order. If ``a`` is a
+ subclass of ndarray, a base class ndarray is returned.
+
+ See Also
+ --------
+ asanyarray : Similar function which passes through subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> import numpy as np
+ >>> np.asarray(a)
+ array([1, 2])
+
+ Existing arrays are not copied:
+
+ >>> a = np.array([1, 2])
+ >>> np.asarray(a) is a
+ True
+
+ If `dtype` is set, array is copied only if dtype does not match:
+
+ >>> a = np.array([1, 2], dtype=np.float32)
+ >>> np.shares_memory(np.asarray(a, dtype=np.float32), a)
+ True
+ >>> np.shares_memory(np.asarray(a, dtype=np.float64), a)
+ False
+
+ Contrary to `asanyarray`, ndarray subclasses are not passed through:
+
+ >>> issubclass(np.recarray, np.ndarray)
+ True
+ >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asarray(a) is a
+ False
+ >>> np.asanyarray(a) is a
+ True
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'asanyarray',
+ """
+ asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None)
+
+ Convert the input to an ndarray, but pass ndarray subclasses through.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes scalars, lists, lists of tuples, tuples, tuples of tuples,
+ tuples of lists, and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Memory layout. 'A' and 'K' depend on the order of input array a.
+ 'C' row-major (C-style),
+ 'F' column-major (Fortran-style) memory representation.
+ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+ 'K' (keep) preserve input order
+ Defaults to 'C'.
+ device : str, optional
+ The device on which to place the created array. Default: ``None``.
+ For Array-API interoperability only, so must be ``"cpu"`` if passed.
+
+ .. versionadded:: 2.1.0
+
+ copy : bool, optional
+ If ``True``, then the object is copied. If ``None`` then the object is
+ copied only if needed, i.e. if ``__array__`` returns a copy, if obj
+ is a nested sequence, or if a copy is needed to satisfy any of
+ the other requirements (``dtype``, ``order``, etc.).
+ For ``False`` it raises a ``ValueError`` if a copy cannot be avoided.
+ Default: ``None``.
+
+ .. versionadded:: 2.1.0
+
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray or an ndarray subclass
+ Array interpretation of `a`. If `a` is an ndarray or a subclass
+ of ndarray, it is returned as-is and no copy is performed.
+
+ See Also
+ --------
+ asarray : Similar function which always returns ndarrays.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and
+ Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> import numpy as np
+ >>> np.asanyarray(a)
+ array([1, 2])
+
+ Instances of `ndarray` subclasses are passed through as-is:
+
+ >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asanyarray(a) is a
+ True
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'ascontiguousarray',
+ """
+ ascontiguousarray(a, dtype=None, *, like=None)
+
+ Return a contiguous array (ndim >= 1) in memory (C order).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ Data-type of returned array.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Contiguous array of same shape and content as `a`, with type `dtype`
+ if specified.
+
+ See Also
+ --------
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ Starting with a Fortran-contiguous array:
+
+ >>> import numpy as np
+ >>> x = np.ones((2, 3), order='F')
+ >>> x.flags['F_CONTIGUOUS']
+ True
+
+ Calling ``ascontiguousarray`` makes a C-contiguous copy:
+
+ >>> y = np.ascontiguousarray(x)
+ >>> y.flags['C_CONTIGUOUS']
+ True
+ >>> np.may_share_memory(x, y)
+ False
+
+ Now, starting with a C-contiguous array:
+
+ >>> x = np.ones((2, 3), order='C')
+ >>> x.flags['C_CONTIGUOUS']
+ True
+
+ Then, calling ``ascontiguousarray`` returns the same object:
+
+ >>> y = np.ascontiguousarray(x)
+ >>> x is y
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'asfortranarray',
+ """
+ asfortranarray(a, dtype=None, *, like=None)
+
+ Return an array (ndim >= 1) laid out in Fortran order in memory.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ By default, the data-type is inferred from the input data.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ The input `a` in Fortran, or column-major, order.
+
+ See Also
+ --------
+ ascontiguousarray : Convert input to a contiguous (C order) array.
+ asanyarray : Convert input to an ndarray with either row or
+ column-major memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ Starting with a C-contiguous array:
+
+ >>> import numpy as np
+ >>> x = np.ones((2, 3), order='C')
+ >>> x.flags['C_CONTIGUOUS']
+ True
+
+ Calling ``asfortranarray`` makes a Fortran-contiguous copy:
+
+ >>> y = np.asfortranarray(x)
+ >>> y.flags['F_CONTIGUOUS']
+ True
+ >>> np.may_share_memory(x, y)
+ False
+
+ Now, starting with a Fortran-contiguous array:
+
+ >>> x = np.ones((2, 3), order='F')
+ >>> x.flags['F_CONTIGUOUS']
+ True
+
+ Then, calling ``asfortranarray`` returns the same object:
+
+ >>> y = np.asfortranarray(x)
+ >>> x is y
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'empty',
+ """
+ empty(shape, dtype=float, order='C', *, device=None, like=None)
+
+ Return a new array of given shape and type, without initializing entries.
+
+ Parameters
+ ----------
+ shape : int or tuple of int
+ Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ Desired output data-type for the array, e.g, `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: 'C'
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+ device : str, optional
+ The device on which to place the created array. Default: ``None``.
+ For Array-API interoperability only, so must be ``"cpu"`` if passed.
+
+ .. versionadded:: 2.0.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data of the given shape, dtype, and
+ order. Object arrays will be initialized to None.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
+ Notes
+ -----
+ Unlike other array creation functions (e.g. `zeros`, `ones`, `full`),
+ `empty` does not initialize the values of the array, and may therefore be
+ marginally faster. However, the values stored in the newly allocated array
+ are arbitrary. For reproducible behavior, be sure to set each element of
+ the array before reading.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.empty([2, 2])
+ array([[ -9.74499359e+001, 6.69583040e-309],
+ [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
+
+ >>> np.empty([2, 2], dtype=int)
+ array([[-1073741821, -1067949133],
+ [ 496041986, 19249760]]) #uninitialized
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'scalar',
+ """
+ scalar(dtype, obj)
+
+ Return a new scalar array of the given type initialized with obj.
+
+ This function is meant mainly for pickle support. `dtype` must be a
+ valid data-type descriptor. If `dtype` corresponds to an object
+ descriptor, then `obj` can be any object, otherwise `obj` must be a
+ string. If `obj` is not given, it will be interpreted as None for object
+ type and as zeros for all other types.
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'zeros',
+ """
+ zeros(shape, dtype=float, order='C', *, like=None)
+
+ Return a new array of given shape and type, filled with zeros.
+
+ Parameters
+ ----------
+ shape : int or tuple of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ The desired data-type for the array, e.g., `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: 'C'
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of zeros with the given shape, dtype, and order.
+
+ See Also
+ --------
+ zeros_like : Return an array of zeros with shape and type of input.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ full : Return a new array of given shape filled with value.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.zeros(5)
+ array([ 0., 0., 0., 0., 0.])
+
+ >>> np.zeros((5,), dtype=int)
+ array([0, 0, 0, 0, 0])
+
+ >>> np.zeros((2, 1))
+ array([[ 0.],
+ [ 0.]])
+
+ >>> s = (2,2)
+ >>> np.zeros(s)
+ array([[ 0., 0.],
+ [ 0., 0.]])
+
+ >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
+ array([(0, 0), (0, 0)],
+ dtype=[('x', '>> import numpy as np
+ >>> np.fromstring('1 2', dtype=int, sep=' ')
+ array([1, 2])
+ >>> np.fromstring('1, 2', dtype=int, sep=',')
+ array([1, 2])
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'compare_chararrays',
+ """
+ compare_chararrays(a1, a2, cmp, rstrip)
+
+ Performs element-wise comparison of two string arrays using the
+ comparison operator specified by `cmp`.
+
+ Parameters
+ ----------
+ a1, a2 : array_like
+ Arrays to be compared.
+ cmp : {"<", "<=", "==", ">=", ">", "!="}
+ Type of comparison.
+ rstrip : Boolean
+ If True, the spaces at the end of Strings are removed before the comparison.
+
+ Returns
+ -------
+ out : ndarray
+ The output array of type Boolean with the same shape as a and b.
+
+ Raises
+ ------
+ ValueError
+ If `cmp` is not valid.
+ TypeError
+ If at least one of `a` or `b` is a non-string array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array(["a", "b", "cde"])
+ >>> b = np.array(["a", "a", "dec"])
+ >>> np.char.compare_chararrays(a, b, ">", True)
+ array([False, True, False])
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'fromiter',
+ """
+ fromiter(iter, dtype, count=-1, *, like=None)
+
+ Create a new 1-dimensional array from an iterable object.
+
+ Parameters
+ ----------
+ iter : iterable object
+ An iterable object providing data for the array.
+ dtype : data-type
+ The data-type of the returned array.
+
+ .. versionchanged:: 1.23
+ Object and subarray dtypes are now supported (note that the final
+ result is not 1-D for a subarray dtype).
+
+ count : int, optional
+ The number of items to read from *iterable*. The default is -1,
+ which means all data is read.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ The output array.
+
+ Notes
+ -----
+ Specify `count` to improve performance. It allows ``fromiter`` to
+ pre-allocate the output array, instead of resizing it on demand.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> iterable = (x*x for x in range(5))
+ >>> np.fromiter(iterable, float)
+ array([ 0., 1., 4., 9., 16.])
+
+ A carefully constructed subarray dtype will lead to higher dimensional
+ results:
+
+ >>> iterable = ((x+1, x+2) for x in range(5))
+ >>> np.fromiter(iterable, dtype=np.dtype((int, 2)))
+ array([[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6]])
+
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'fromfile',
+ """
+ fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
+
+ Construct an array from data in a text or binary file.
+
+ A highly efficient way of reading binary data with a known data-type,
+ as well as parsing simply formatted text files. Data written using the
+ `tofile` method can be read using this function.
+
+ Parameters
+ ----------
+ file : file or str or Path
+ Open file object or filename.
+ dtype : data-type
+ Data type of the returned array.
+ For binary files, it is used to determine the size and byte-order
+ of the items in the file.
+ Most builtin numeric types are supported and extension types may be supported.
+ count : int
+ Number of items to read. ``-1`` means all items (i.e., the complete
+ file).
+ sep : str
+ Separator between items if file is a text file.
+ Empty ("") separator means the file should be treated as binary.
+ Spaces (" ") in the separator match zero or more whitespace characters.
+ A separator consisting only of spaces must match at least one
+ whitespace.
+ offset : int
+ The offset (in bytes) from the file's current position. Defaults to 0.
+ Only permitted for binary files.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ See also
+ --------
+ load, save
+ ndarray.tofile
+ loadtxt : More flexible way of loading data from a text file.
+
+ Notes
+ -----
+ Do not rely on the combination of `tofile` and `fromfile` for
+ data storage, as the binary files generated are not platform
+ independent. In particular, no byte-order or data-type information is
+ saved. Data can be stored in the platform independent ``.npy`` format
+ using `save` and `load` instead.
+
+ Examples
+ --------
+ Construct an ndarray:
+
+ >>> import numpy as np
+ >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
+ ... ('temp', float)])
+ >>> x = np.zeros((1,), dtype=dt)
+ >>> x['time']['min'] = 10; x['temp'] = 98.25
+ >>> x
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '>> import tempfile
+ >>> fname = tempfile.mkstemp()[1]
+ >>> x.tofile(fname)
+
+ Read the raw data from disk:
+
+ >>> np.fromfile(fname, dtype=dt)
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '>> np.save(fname, x)
+ >>> np.load(fname + '.npy')
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '>> dt = np.dtype(int)
+ >>> dt = dt.newbyteorder('>')
+ >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
+
+ The data of the resulting array will not be byteswapped, but will be
+ interpreted correctly.
+
+ This function creates a view into the original object. This should be safe
+ in general, but it may make sense to copy the result when the original
+ object is mutable or untrusted.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> s = b'hello world'
+ >>> np.frombuffer(s, dtype='S1', count=5, offset=6)
+ array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
+
+ >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
+ array([1, 2], dtype=uint8)
+ >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
+ array([1, 2, 3], dtype=uint8)
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'from_dlpack',
+ """
+ from_dlpack(x, /, *, device=None, copy=None)
+
+ Create a NumPy array from an object implementing the ``__dlpack__``
+ protocol. Generally, the returned NumPy array is a view of the input
+ object. See [1]_ and [2]_ for more details.
+
+ Parameters
+ ----------
+ x : object
+ A Python object that implements the ``__dlpack__`` and
+ ``__dlpack_device__`` methods.
+ device : device, optional
+ Device on which to place the created array. Default: ``None``.
+ Must be ``"cpu"`` if passed which may allow importing an array
+ that is not already CPU available.
+ copy : bool, optional
+ Boolean indicating whether or not to copy the input. If ``True``,
+ the copy will be made. If ``False``, the function will never copy,
+ and will raise ``BufferError`` in case a copy is deemed necessary.
+ Passing it requests a copy from the exporter who may or may not
+ implement the capability.
+ If ``None``, the function will reuse the existing memory buffer if
+ possible and copy otherwise. Default: ``None``.
+
+
+ Returns
+ -------
+ out : ndarray
+
+ References
+ ----------
+ .. [1] Array API documentation,
+ https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack
+
+ .. [2] Python specification for DLPack,
+ https://dmlc.github.io/dlpack/latest/python_spec.html
+
+ Examples
+ --------
+ >>> import torch # doctest: +SKIP
+ >>> x = torch.arange(10) # doctest: +SKIP
+ >>> # create a view of the torch tensor "x" in NumPy
+ >>> y = np.from_dlpack(x) # doctest: +SKIP
+ """)
+
+add_newdoc('numpy._core.multiarray', 'correlate',
+ """cross_correlate(a,v, mode=0)""")
+
+add_newdoc('numpy._core.multiarray', 'arange',
+ """
+ arange([start,] stop[, step,], dtype=None, *, device=None, like=None)
+
+ Return evenly spaced values within a given interval.
+
+ ``arange`` can be called with a varying number of positional arguments:
+
+ * ``arange(stop)``: Values are generated within the half-open interval
+ ``[0, stop)`` (in other words, the interval including `start` but
+ excluding `stop`).
+ * ``arange(start, stop)``: Values are generated within the half-open
+ interval ``[start, stop)``.
+ * ``arange(start, stop, step)`` Values are generated within the half-open
+ interval ``[start, stop)``, with spacing between values given by
+ ``step``.
+
+ For integer arguments the function is roughly equivalent to the Python
+ built-in :py:class:`range`, but returns an ndarray rather than a ``range``
+ instance.
+
+ When using a non-integer step, such as 0.1, it is often better to use
+ `numpy.linspace`.
+
+ See the Warning sections below for more information.
+
+ Parameters
+ ----------
+ start : integer or real, optional
+ Start of interval. The interval includes this value. The default
+ start value is 0.
+ stop : integer or real
+ End of interval. The interval does not include this value, except
+ in some cases where `step` is not an integer and floating point
+ round-off affects the length of `out`.
+ step : integer or real, optional
+ Spacing between values. For any output `out`, this is the distance
+ between two adjacent values, ``out[i+1] - out[i]``. The default
+ step size is 1. If `step` is specified as a position argument,
+ `start` must also be given.
+ dtype : dtype, optional
+ The type of the output array. If `dtype` is not given, infer the data
+ type from the other input arguments.
+ device : str, optional
+ The device on which to place the created array. Default: ``None``.
+ For Array-API interoperability only, so must be ``"cpu"`` if passed.
+
+ .. versionadded:: 2.0.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ arange : ndarray
+ Array of evenly spaced values.
+
+ For floating point arguments, the length of the result is
+ ``ceil((stop - start)/step)``. Because of floating point overflow,
+ this rule may result in the last element of `out` being greater
+ than `stop`.
+
+ Warnings
+ --------
+ The length of the output might not be numerically stable.
+
+ Another stability issue is due to the internal implementation of
+ `numpy.arange`.
+ The actual step value used to populate the array is
+ ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss
+ can occur here, due to casting or due to using floating points when
+ `start` is much larger than `step`. This can lead to unexpected
+ behaviour. For example::
+
+ >>> np.arange(0, 5, 0.5, dtype=int)
+ array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ >>> np.arange(-3, 3, 0.5, dtype=int)
+ array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
+
+ In such cases, the use of `numpy.linspace` should be preferred.
+
+ The built-in :py:class:`range` generates :std:doc:`Python built-in integers
+ that have arbitrary size `, while `numpy.arange`
+ produces `numpy.int32` or `numpy.int64` numbers. This may result in
+ incorrect results for large integer values::
+
+ >>> power = 40
+ >>> modulo = 10000
+ >>> x1 = [(n ** power) % modulo for n in range(8)]
+ >>> x2 = [(n ** power) % modulo for n in np.arange(8)]
+ >>> print(x1)
+ [0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct
+ >>> print(x2)
+ [0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect
+
+ See Also
+ --------
+ numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
+ numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
+ numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.arange(3)
+ array([0, 1, 2])
+ >>> np.arange(3.0)
+ array([ 0., 1., 2.])
+ >>> np.arange(3,7)
+ array([3, 4, 5, 6])
+ >>> np.arange(3,7,2)
+ array([3, 5])
+
+ """)
+
+add_newdoc('numpy._core.multiarray', '_get_ndarray_c_version',
+ """_get_ndarray_c_version()
+
+ Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
+
+ """)
+
+add_newdoc('numpy._core.multiarray', '_reconstruct',
+ """_reconstruct(subtype, shape, dtype)
+
+ Construct an empty array. Used by Pickles.
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'promote_types',
+ """
+ promote_types(type1, type2)
+
+ Returns the data type with the smallest size and smallest scalar
+ kind to which both ``type1`` and ``type2`` may be safely cast.
+ The returned data type is always considered "canonical", this mainly
+ means that the promoted dtype will always be in native byte order.
+
+ This function is symmetric, but rarely associative.
+
+ Parameters
+ ----------
+ type1 : dtype or dtype specifier
+ First data type.
+ type2 : dtype or dtype specifier
+ Second data type.
+
+ Returns
+ -------
+ out : dtype
+ The promoted data type.
+
+ Notes
+ -----
+ Please see `numpy.result_type` for additional information about promotion.
+
+ Starting in NumPy 1.9, promote_types function now returns a valid string
+ length when given an integer or float dtype as one argument and a string
+ dtype as another argument. Previously it always returned the input string
+ dtype, even if it wasn't long enough to store the max integer/float value
+ converted to a string.
+
+ .. versionchanged:: 1.23.0
+
+ NumPy now supports promotion for more structured dtypes. It will now
+ remove unnecessary padding from a structure dtype and promote included
+ fields individually.
+
+ See Also
+ --------
+ result_type, dtype, can_cast
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.promote_types('f4', 'f8')
+ dtype('float64')
+
+ >>> np.promote_types('i8', 'f4')
+ dtype('float64')
+
+ >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8')
+ dtype('S11')
+
+ An example of a non-associative case:
+
+ >>> p = np.promote_types
+ >>> p('S', p('i1', 'u1'))
+ dtype('S6')
+ >>> p(p('S', 'i1'), 'u1')
+ dtype('S4')
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'c_einsum',
+ """
+ c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
+ casting='safe')
+
+ *This documentation shadows that of the native python implementation of the `einsum` function,
+ except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
+
+ Evaluates the Einstein summation convention on the operands.
+
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
+
+ See the notes and examples for clarification.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
+ operands : list of array_like
+ These are the arrays for the operation.
+ out : ndarray, optional
+ If provided, the calculation is done into this array.
+ dtype : {data-type, None}, optional
+ If provided, forces the calculation to use the data type specified.
+ Note that you may have to also give a more liberal `casting`
+ parameter to allow the conversions. Default is None.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the output. 'C' means it should
+ be C contiguous. 'F' means it should be Fortran contiguous,
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+ 'K' means it should be as close to the layout of the inputs as
+ is possible, including arbitrarily permuted axes.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Setting this to
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Default is 'safe'.
+ optimize : {False, True, 'greedy', 'optimal'}, optional
+ Controls if intermediate optimization should occur. No optimization
+ will occur if False and True will default to the 'greedy' algorithm.
+ Also accepts an explicit contraction list from the ``np.einsum_path``
+ function. See ``np.einsum_path`` for more details. Defaults to False.
+
+ Returns
+ -------
+ output : ndarray
+ The calculation based on the Einstein summation convention.
+
+ See Also
+ --------
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+
+ Notes
+ -----
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
+
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
+
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
+
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) `. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+ describes traditional matrix multiplication and is equivalent to
+ :py:func:`np.matmul(a,b) `. Repeated subscript labels in one
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+ to :py:func:`np.trace(a) `.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) `
+ if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)``
+ is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
+
+ To enable and control broadcasting, use an ellipsis. Default
+ NumPy-style broadcasting is done by adding an ellipsis
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
+ ``np.einsum('...i->...', a)`` is like
+ :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape.
+ To take the trace along the first and last axes,
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+ product with the left-most indices instead of rightmost, one can do
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+ When there is only one operand, no axes are summed, and no output
+ parameter is provided, a view into the operand is returned instead
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+ produces a view (changed in version 1.10.0).
+
+ `einsum` also provides an alternative way to provide the subscripts
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
+
+ Views returned from einsum are now writeable whenever the input array
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) `
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ of a 2D array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(25).reshape(5,5)
+ >>> b = np.arange(5)
+ >>> c = np.arange(6).reshape(2,3)
+
+ Trace of a matrix:
+
+ >>> np.einsum('ii', a)
+ 60
+ >>> np.einsum(a, [0,0])
+ 60
+ >>> np.trace(a)
+ 60
+
+ Extract the diagonal (requires explicit form):
+
+ >>> np.einsum('ii->i', a)
+ array([ 0, 6, 12, 18, 24])
+ >>> np.einsum(a, [0,0], [0])
+ array([ 0, 6, 12, 18, 24])
+ >>> np.diag(a)
+ array([ 0, 6, 12, 18, 24])
+
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
+
+ >>> np.einsum('ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum(c, [1,0])
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.transpose(c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
+ >>> np.einsum('..., ...', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(',ij', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.multiply(3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+
+ Vector outer product:
+
+ >>> np.einsum('i,j', np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.outer(np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+
+ Tensor contraction:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> np.einsum('ijk,jil->kl', a, b)
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ Example of ellipsis use:
+
+ >>> a = np.arange(6).reshape((3,2))
+ >>> b = np.arange(12).reshape((4,3))
+ >>> np.einsum('ki,jk->ij', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('ki,...k->i...', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('k...,jk', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+
+ """)
+
+
+##############################################################################
+#
+# Documentation for ndarray attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ndarray object
+#
+##############################################################################
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray',
+ """
+ ndarray(shape, dtype=float, buffer=None, offset=0,
+ strides=None, order=None)
+
+ An array object represents a multidimensional, homogeneous array
+ of fixed-size items. An associated data-type object describes the
+ format of each element in the array (its byte-order, how many bytes it
+ occupies in memory, whether it is an integer, a floating point number,
+ or something else, etc.)
+
+ Arrays should be constructed using `array`, `zeros` or `empty` (refer
+ to the See Also section below). The parameters given here refer to
+ a low-level method (`ndarray(...)`) for instantiating an array.
+
+ For more information, refer to the `numpy` module and examine the
+ methods and attributes of an array.
+
+ Parameters
+ ----------
+ (for the __new__ method; see Notes below)
+
+ shape : tuple of ints
+ Shape of created array.
+ dtype : data-type, optional
+ Any object that can be interpreted as a numpy data type.
+ buffer : object exposing buffer interface, optional
+ Used to fill the array with data.
+ offset : int, optional
+ Offset of array data in buffer.
+ strides : tuple of ints, optional
+ Strides of data in memory.
+ order : {'C', 'F'}, optional
+ Row-major (C-style) or column-major (Fortran-style) order.
+
+ Attributes
+ ----------
+ T : ndarray
+ Transpose of the array.
+ data : buffer
+ The array's elements, in memory.
+ dtype : dtype object
+ Describes the format of the elements in the array.
+ flags : dict
+ Dictionary containing information related to memory use, e.g.,
+ 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
+ flat : numpy.flatiter object
+ Flattened version of the array as an iterator. The iterator
+ allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
+ assignment examples; TODO).
+ imag : ndarray
+ Imaginary part of the array.
+ real : ndarray
+ Real part of the array.
+ size : int
+ Number of elements in the array.
+ itemsize : int
+ The memory use of each array element in bytes.
+ nbytes : int
+ The total number of bytes required to store the array data,
+ i.e., ``itemsize * size``.
+ ndim : int
+ The array's number of dimensions.
+ shape : tuple of ints
+ Shape of the array.
+ strides : tuple of ints
+ The step-size required to move from one element to the next in
+ memory. For example, a contiguous ``(3, 4)`` array of type
+ ``int16`` in C-order has strides ``(8, 2)``. This implies that
+ to move from element to element in memory requires jumps of 2 bytes.
+ To move from row-to-row, one needs to jump 8 bytes at a time
+ (``2 * 4``).
+ ctypes : ctypes object
+ Class containing properties of the array needed for interaction
+ with ctypes.
+ base : ndarray
+ If the array is a view into another array, that array is its `base`
+ (unless that array is also a view). The `base` array is where the
+ array data is actually stored.
+
+ See Also
+ --------
+ array : Construct an array.
+ zeros : Create an array, each element of which is zero.
+ empty : Create an array, but leave its allocated memory unchanged (i.e.,
+ it contains "garbage").
+ dtype : Create a data-type.
+ numpy.typing.NDArray : An ndarray alias :term:`generic `
+ w.r.t. its `dtype.type `.
+
+ Notes
+ -----
+ There are two modes of creating an array using ``__new__``:
+
+ 1. If `buffer` is None, then only `shape`, `dtype`, and `order`
+ are used.
+ 2. If `buffer` is an object exposing the buffer interface, then
+ all keywords are interpreted.
+
+ No ``__init__`` method is needed because the array is fully initialized
+ after the ``__new__`` method.
+
+ Examples
+ --------
+ These examples illustrate the low-level `ndarray` constructor. Refer
+ to the `See Also` section above for easier ways of constructing an
+ ndarray.
+
+ First mode, `buffer` is None:
+
+ >>> import numpy as np
+ >>> np.ndarray(shape=(2,2), dtype=float, order='F')
+ array([[0.0e+000, 0.0e+000], # random
+ [ nan, 2.5e-323]])
+
+ Second mode:
+
+ >>> np.ndarray((2,), buffer=np.array([1,2,3]),
+ ... offset=np.int_().itemsize,
+ ... dtype=int) # offset = 1*itemsize, i.e. skip first element
+ array([2, 3])
+
+ """)
+
+
+##############################################################################
+#
+# ndarray attributes
+#
+##############################################################################
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_interface__',
+ """Array protocol: Python side."""))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_priority__',
+ """Array priority."""))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__',
+ """Array protocol: C-struct side."""))
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__',
+ """
+ a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None)
+
+ DLPack Protocol: Part of the Array API.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__',
+ """
+ a.__dlpack_device__()
+
+ DLPack Protocol: Part of the Array API.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('base',
+ """
+ Base object if memory is from some other object.
+
+ Examples
+ --------
+ The base of an array that owns its memory is None:
+
+ >>> import numpy as np
+ >>> x = np.array([1,2,3,4])
+ >>> x.base is None
+ True
+
+ Slicing creates a view, whose memory is shared with x:
+
+ >>> y = x[2:]
+ >>> y.base is x
+ True
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('ctypes',
+ """
+ An object to simplify the interaction of the array with the ctypes
+ module.
+
+ This attribute creates an object that makes it easier to use arrays
+ when calling shared libraries with the ctypes module. The returned
+ object has, among others, data, shape, and strides attributes (see
+ Notes below) which themselves return ctypes objects that can be used
+ as arguments to a shared library.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ c : Python object
+ Possessing attributes data, shape, strides, etc.
+
+ See Also
+ --------
+ numpy.ctypeslib
+
+ Notes
+ -----
+ Below are the public attributes of this object which were documented
+ in "Guide to NumPy" (we have omitted undocumented public attributes,
+ as well as documented private attributes):
+
+ .. autoattribute:: numpy._core._internal._ctypes.data
+ :noindex:
+
+ .. autoattribute:: numpy._core._internal._ctypes.shape
+ :noindex:
+
+ .. autoattribute:: numpy._core._internal._ctypes.strides
+ :noindex:
+
+ .. automethod:: numpy._core._internal._ctypes.data_as
+ :noindex:
+
+ .. automethod:: numpy._core._internal._ctypes.shape_as
+ :noindex:
+
+ .. automethod:: numpy._core._internal._ctypes.strides_as
+ :noindex:
+
+ If the ctypes module is not available, then the ctypes attribute
+ of array objects still returns something useful, but ctypes objects
+ are not returned and errors may be raised instead. In particular,
+ the object will still have the ``as_parameter`` attribute which will
+ return an integer equal to the data attribute.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import ctypes
+ >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
+ >>> x
+ array([[0, 1],
+ [2, 3]], dtype=int32)
+ >>> x.ctypes.data
+ 31962608 # may vary
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
+ <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
+ c_uint(0)
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
+ c_ulong(4294967296)
+ >>> x.ctypes.shape
+ # may vary
+ >>> x.ctypes.strides
+ # may vary
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('data',
+ """Python buffer object pointing to the start of the array's data."""))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('dtype',
+ """
+ Data-type of the array's elements.
+
+ .. warning::
+
+ Setting ``arr.dtype`` is discouraged and may be deprecated in the
+ future. Setting will replace the ``dtype`` without modifying the
+ memory (see also `ndarray.view` and `ndarray.astype`).
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ d : numpy dtype object
+
+ See Also
+ --------
+ ndarray.astype : Cast the values contained in the array to a new data-type.
+ ndarray.view : Create a view of the same data but a different data-type.
+ numpy.dtype
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(4).reshape((2, 2))
+ >>> x
+ array([[0, 1],
+ [2, 3]])
+ >>> x.dtype
+ dtype('int64') # may vary (OS, bitness)
+ >>> isinstance(x.dtype, np.dtype)
+ True
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('imag',
+ """
+ The imaginary part of the array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.sqrt([1+0j, 0+1j])
+ >>> x.imag
+ array([ 0. , 0.70710678])
+ >>> x.imag.dtype
+ dtype('float64')
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('itemsize',
+ """
+ Length of one array element in bytes.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1,2,3], dtype=np.float64)
+ >>> x.itemsize
+ 8
+ >>> x = np.array([1,2,3], dtype=np.complex128)
+ >>> x.itemsize
+ 16
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('flags',
+ """
+ Information about the memory layout of the array.
+
+ Attributes
+ ----------
+ C_CONTIGUOUS (C)
+ The data is in a single, C-style contiguous segment.
+ F_CONTIGUOUS (F)
+ The data is in a single, Fortran-style contiguous segment.
+ OWNDATA (O)
+ The array owns the memory it uses or borrows it from another object.
+ WRITEABLE (W)
+ The data area can be written to. Setting this to False locks
+ the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
+ from its base array at creation time, but a view of a writeable
+ array may be subsequently locked while the base array remains writeable.
+ (The opposite is not true, in that a view of a locked array may not
+ be made writeable. However, currently, locking a base object does not
+ lock any views that already reference it, so under that circumstance it
+ is possible to alter the contents of a locked array via a previously
+ created writeable view onto it.) Attempting to change a non-writeable
+ array raises a RuntimeError exception.
+ ALIGNED (A)
+ The data and all elements are aligned appropriately for the hardware.
+ WRITEBACKIFCOPY (X)
+ This array is a copy of some other array. The C-API function
+ PyArray_ResolveWritebackIfCopy must be called before deallocating
+ to the base array will be updated with the contents of this array.
+ FNC
+ F_CONTIGUOUS and not C_CONTIGUOUS.
+ FORC
+ F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
+ BEHAVED (B)
+ ALIGNED and WRITEABLE.
+ CARRAY (CA)
+ BEHAVED and C_CONTIGUOUS.
+ FARRAY (FA)
+ BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
+
+ Notes
+ -----
+ The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
+ or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
+ names are only supported in dictionary access.
+
+ Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be
+ changed by the user, via direct assignment to the attribute or dictionary
+ entry, or by calling `ndarray.setflags`.
+
+ The array flags cannot be set arbitrarily:
+
+ - WRITEBACKIFCOPY can only be set ``False``.
+ - ALIGNED can only be set ``True`` if the data is truly aligned.
+ - WRITEABLE can only be set ``True`` if the array owns its own memory
+ or the ultimate owner of the memory exposes a writeable buffer
+ interface or is a string.
+
+ Arrays can be both C-style and Fortran-style contiguous simultaneously.
+ This is clear for 1-dimensional arrays, but can also be true for higher
+ dimensional arrays.
+
+ Even for contiguous arrays a stride for a given dimension
+ ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
+ or the array has no elements.
+ It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+ for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+ Fortran-style contiguous arrays is true.
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('flat',
+ """
+ A 1-D iterator over the array.
+
+ This is a `numpy.flatiter` instance, which acts similarly to, but is not
+ a subclass of, Python's built-in iterator object.
+
+ See Also
+ --------
+ flatten : Return a copy of the array collapsed into one dimension.
+
+ flatiter
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(1, 7).reshape(2, 3)
+ >>> x
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> x.flat[3]
+ 4
+ >>> x.T
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> x.T.flat[3]
+ 5
+ >>> type(x.flat)
+
+
+ An assignment example:
+
+ >>> x.flat = 3; x
+ array([[3, 3, 3],
+ [3, 3, 3]])
+ >>> x.flat[[1,4]] = 1; x
+ array([[3, 1, 3],
+ [3, 1, 3]])
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('nbytes',
+ """
+ Total bytes consumed by the elements of the array.
+
+ Notes
+ -----
+ Does not include memory consumed by non-element attributes of the
+ array object.
+
+ See Also
+ --------
+ sys.getsizeof
+ Memory consumed by the object itself without parents in case view.
+ This does include memory consumed by non-element attributes.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.zeros((3,5,2), dtype=np.complex128)
+ >>> x.nbytes
+ 480
+ >>> np.prod(x.shape) * x.itemsize
+ 480
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('ndim',
+ """
+ Number of array dimensions.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> x.ndim
+ 1
+ >>> y = np.zeros((2, 3, 4))
+ >>> y.ndim
+ 3
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('real',
+ """
+ The real part of the array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.sqrt([1+0j, 0+1j])
+ >>> x.real
+ array([ 1. , 0.70710678])
+ >>> x.real.dtype
+ dtype('float64')
+
+ See Also
+ --------
+ numpy.real : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('shape',
+ """
+ Tuple of array dimensions.
+
+ The shape property is usually used to get the current shape of an array,
+ but may also be used to reshape the array in-place by assigning a tuple of
+ array dimensions to it. As with `numpy.reshape`, one of the new shape
+ dimensions can be -1, in which case its value is inferred from the size of
+ the array and the remaining dimensions. Reshaping an array in-place will
+ fail if a copy is required.
+
+ .. warning::
+
+ Setting ``arr.shape`` is discouraged and may be deprecated in the
+ future. Using `ndarray.reshape` is the preferred approach.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3, 4])
+ >>> x.shape
+ (4,)
+ >>> y = np.zeros((2, 3, 4))
+ >>> y.shape
+ (2, 3, 4)
+ >>> y.shape = (3, 8)
+ >>> y
+ array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0.]])
+ >>> y.shape = (3, 6)
+ Traceback (most recent call last):
+ File "", line 1, in
+ ValueError: cannot reshape array of size 24 into shape (3,6)
+ >>> np.zeros((4,2))[::2].shape = (-1,)
+ Traceback (most recent call last):
+ File "", line 1, in
+ AttributeError: Incompatible shape for in-place modification. Use
+ `.reshape()` to make a copy with the desired shape.
+
+ See Also
+ --------
+ numpy.shape : Equivalent getter function.
+ numpy.reshape : Function similar to setting ``shape``.
+ ndarray.reshape : Method similar to setting ``shape``.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('size',
+ """
+ Number of elements in the array.
+
+ Equal to ``np.prod(a.shape)``, i.e., the product of the array's
+ dimensions.
+
+ Notes
+ -----
+ `a.size` returns a standard arbitrary precision Python integer. This
+ may not be the case with other methods of obtaining the same value
+ (like the suggested ``np.prod(a.shape)``, which returns an instance
+ of ``np.int_``), and may be relevant if the value is used further in
+ calculations that may overflow a fixed size integer type.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.zeros((3, 5, 2), dtype=np.complex128)
+ >>> x.size
+ 30
+ >>> np.prod(x.shape)
+ 30
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('strides',
+ """
+ Tuple of bytes to step in each dimension when traversing an array.
+
+ The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
+ is::
+
+ offset = sum(np.array(i) * a.strides)
+
+ A more detailed explanation of strides can be found in
+ :ref:`arrays.ndarray`.
+
+ .. warning::
+
+ Setting ``arr.strides`` is discouraged and may be deprecated in the
+ future. `numpy.lib.stride_tricks.as_strided` should be preferred
+ to create a new view of the same data in a safer way.
+
+ Notes
+ -----
+ Imagine an array of 32-bit integers (each 4 bytes)::
+
+ x = np.array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]], dtype=np.int32)
+
+ This array is stored in memory as 40 bytes, one after the other
+ (known as a contiguous block of memory). The strides of an array tell
+ us how many bytes we have to skip in memory to move to the next position
+ along a certain axis. For example, we have to skip 4 bytes (1 value) to
+ move to the next column, but 20 bytes (5 values) to get to the same
+ position in the next row. As such, the strides for the array `x` will be
+ ``(20, 4)``.
+
+ See Also
+ --------
+ numpy.lib.stride_tricks.as_strided
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> y = np.reshape(np.arange(2 * 3 * 4, dtype=np.int32), (2, 3, 4))
+ >>> y
+ array([[[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]],
+ [[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]]], dtype=np.int32)
+ >>> y.strides
+ (48, 16, 4)
+ >>> y[1, 1, 1]
+ np.int32(17)
+ >>> offset = sum(y.strides * np.array((1, 1, 1)))
+ >>> offset // y.itemsize
+ np.int64(17)
+
+ >>> x = np.reshape(np.arange(5*6*7*8, dtype=np.int32), (5, 6, 7, 8))
+ >>> x = x.transpose(2, 3, 1, 0)
+ >>> x.strides
+ (32, 4, 224, 1344)
+ >>> i = np.array([3, 5, 2, 2], dtype=np.int32)
+ >>> offset = sum(i * x.strides)
+ >>> x[3, 5, 2, 2]
+ np.int32(813)
+ >>> offset // x.itemsize
+ np.int64(813)
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('T',
+ """
+ View of the transposed array.
+
+ Same as ``self.transpose()``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> a.T
+ array([[1, 3],
+ [2, 4]])
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> a
+ array([1, 2, 3, 4])
+ >>> a.T
+ array([1, 2, 3, 4])
+
+ See Also
+ --------
+ transpose
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('mT',
+ """
+ View of the matrix transposed array.
+
+ The matrix transpose is the transpose of the last two dimensions, even
+ if the array is of higher dimension.
+
+ .. versionadded:: 2.0
+
+ Raises
+ ------
+ ValueError
+ If the array is of dimension less than 2.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> a.mT
+ array([[1, 3],
+ [2, 4]])
+
+ >>> a = np.arange(8).reshape((2, 2, 2))
+ >>> a
+ array([[[0, 1],
+ [2, 3]],
+
+ [[4, 5],
+ [6, 7]]])
+ >>> a.mT
+ array([[[0, 2],
+ [1, 3]],
+
+ [[4, 6],
+ [5, 7]]])
+
+ """))
+##############################################################################
+#
+# ndarray methods
+#
+##############################################################################
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__',
+ """
+ a.__array__([dtype], *, copy=None)
+
+ For ``dtype`` parameter it returns a new reference to self if
+ ``dtype`` is not given or it matches array's data type.
+ A new array of provided data type is returned if ``dtype``
+ is different from the current data type of the array.
+ For ``copy`` parameter it returns a new reference to self if
+ ``copy=False`` or ``copy=None`` and copying isn't enforced by ``dtype``
+ parameter. The method returns a new array for ``copy=True``, regardless of
+ ``dtype`` parameter.
+
+ A more detailed explanation of the ``__array__`` interface
+ can be found in :ref:`dunder_array.interface`.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__',
+ """
+ a.__array_finalize__(obj, /)
+
+ Present so subclasses can call super. Does nothing.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__',
+ """
+ a.__array_wrap__(array[, context], /)
+
+ Returns a view of `array` with the same type as self.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__',
+ """
+ a.__copy__()
+
+ Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
+
+ Equivalent to ``a.copy(order='K')``.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__',
+ """
+ a.__class_getitem__(item, /)
+
+ Return a parametrized wrapper around the `~numpy.ndarray` type.
+
+ .. versionadded:: 1.22
+
+ Returns
+ -------
+ alias : types.GenericAlias
+ A parametrized `~numpy.ndarray` type.
+
+ Examples
+ --------
+ >>> from typing import Any
+ >>> import numpy as np
+
+ >>> np.ndarray[Any, np.dtype[np.uint8]]
+ numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]]
+
+ See Also
+ --------
+ :pep:`585` : Type hinting generics in standard collections.
+ numpy.typing.NDArray : An ndarray alias :term:`generic `
+ w.r.t. its `dtype.type `.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__',
+ """
+ a.__deepcopy__(memo, /)
+
+ Used if :func:`copy.deepcopy` is called on an array.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__',
+ """
+ a.__reduce__()
+
+ For pickling.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__',
+ """
+ a.__setstate__(state, /)
+
+ For unpickling.
+
+ The `state` argument must be a sequence that contains the following
+ elements:
+
+ Parameters
+ ----------
+ version : int
+ optional pickle version. If omitted defaults to 0.
+ shape : tuple
+ dtype : data-type
+ isFortran : bool
+ rawdata : string or list
+ a binary string with the data (or a list if 'a' is an object array)
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('all',
+ """
+ a.all(axis=None, out=None, keepdims=False, *, where=True)
+
+ Returns True if all elements evaluate to True.
+
+ Refer to `numpy.all` for full documentation.
+
+ See Also
+ --------
+ numpy.all : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('any',
+ """
+ a.any(axis=None, out=None, keepdims=False, *, where=True)
+
+ Returns True if any of the elements of `a` evaluate to True.
+
+ Refer to `numpy.any` for full documentation.
+
+ See Also
+ --------
+ numpy.any : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax',
+ """
+ a.argmax(axis=None, out=None, *, keepdims=False)
+
+ Return indices of the maximum values along the given axis.
+
+ Refer to `numpy.argmax` for full documentation.
+
+ See Also
+ --------
+ numpy.argmax : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin',
+ """
+ a.argmin(axis=None, out=None, *, keepdims=False)
+
+ Return indices of the minimum values along the given axis.
+
+ Refer to `numpy.argmin` for detailed documentation.
+
+ See Also
+ --------
+ numpy.argmin : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort',
+ """
+ a.argsort(axis=-1, kind=None, order=None)
+
+ Returns the indices that would sort this array.
+
+ Refer to `numpy.argsort` for full documentation.
+
+ See Also
+ --------
+ numpy.argsort : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition',
+ """
+ a.argpartition(kth, axis=-1, kind='introselect', order=None)
+
+ Returns the indices that would partition this array.
+
+ Refer to `numpy.argpartition` for full documentation.
+
+ See Also
+ --------
+ numpy.argpartition : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('astype',
+ """
+ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
+
+ Copy of the array, cast to a specified type.
+
+ Parameters
+ ----------
+ dtype : str or dtype
+ Typecode or data-type to which the array is cast.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout order of the result.
+ 'C' means C order, 'F' means Fortran order, 'A'
+ means 'F' order if all the arrays are Fortran contiguous,
+ 'C' order otherwise, and 'K' means as close to the
+ order the array elements appear in memory as possible.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'unsafe'
+ for backwards compatibility.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through (default), otherwise
+ the returned array will be forced to be a base-class array.
+ copy : bool, optional
+ By default, astype always returns a newly allocated array. If this
+ is set to false, and the `dtype`, `order`, and `subok`
+ requirements are satisfied, the input array is returned instead
+ of a copy.
+
+ Returns
+ -------
+ arr_t : ndarray
+ Unless `copy` is False and the other conditions for returning the input
+ array are satisfied (see description for `copy` input parameter), `arr_t`
+ is a new array of the same shape as the input array, with dtype, order
+ given by `dtype`, `order`.
+
+ Raises
+ ------
+ ComplexWarning
+ When casting from complex to float or int. To avoid this,
+ one should use ``a.real.astype(t)``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 2.5])
+ >>> x
+ array([1. , 2. , 2.5])
+
+ >>> x.astype(int)
+ array([1, 2, 2])
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap',
+ """
+ a.byteswap(inplace=False)
+
+ Swap the bytes of the array elements
+
+ Toggle between low-endian and big-endian data representation by
+ returning a byteswapped array, optionally swapped in-place.
+ Arrays of byte-strings are not swapped. The real and imaginary
+ parts of a complex number are swapped individually.
+
+ Parameters
+ ----------
+ inplace : bool, optional
+ If ``True``, swap bytes in-place, default is ``False``.
+
+ Returns
+ -------
+ out : ndarray
+ The byteswapped array. If `inplace` is ``True``, this is
+ a view to self.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> A = np.array([1, 256, 8755], dtype=np.int16)
+ >>> list(map(hex, A))
+ ['0x1', '0x100', '0x2233']
+ >>> A.byteswap(inplace=True)
+ array([ 256, 1, 13090], dtype=int16)
+ >>> list(map(hex, A))
+ ['0x100', '0x1', '0x3322']
+
+ Arrays of byte-strings are not swapped
+
+ >>> A = np.array([b'ceg', b'fac'])
+ >>> A.byteswap()
+ array([b'ceg', b'fac'], dtype='|S3')
+
+ ``A.view(A.dtype.newbyteorder()).byteswap()`` produces an array with
+ the same values but different representation in memory
+
+ >>> A = np.array([1, 2, 3],dtype=np.int64)
+ >>> A.view(np.uint8)
+ array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
+ 0, 0], dtype=uint8)
+ >>> A.view(A.dtype.newbyteorder()).byteswap(inplace=True)
+ array([1, 2, 3], dtype='>i8')
+ >>> A.view(np.uint8)
+ array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
+ 0, 3], dtype=uint8)
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('choose',
+ """
+ a.choose(choices, out=None, mode='raise')
+
+ Use an index array to construct a new array from a set of choices.
+
+ Refer to `numpy.choose` for full documentation.
+
+ See Also
+ --------
+ numpy.choose : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('clip',
+ """
+ a.clip(min=None, max=None, out=None, **kwargs)
+
+ Return an array whose values are limited to ``[min, max]``.
+ One of max or min must be given.
+
+ Refer to `numpy.clip` for full documentation.
+
+ See Also
+ --------
+ numpy.clip : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('compress',
+ """
+ a.compress(condition, axis=None, out=None)
+
+ Return selected slices of this array along given axis.
+
+ Refer to `numpy.compress` for full documentation.
+
+ See Also
+ --------
+ numpy.compress : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('conj',
+ """
+ a.conj()
+
+ Complex-conjugate all elements.
+
+ Refer to `numpy.conjugate` for full documentation.
+
+ See Also
+ --------
+ numpy.conjugate : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate',
+ """
+ a.conjugate()
+
+ Return the complex conjugate, element-wise.
+
+ Refer to `numpy.conjugate` for full documentation.
+
+ See Also
+ --------
+ numpy.conjugate : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('copy',
+ """
+ a.copy(order='C')
+
+ Return a copy of the array.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the copy. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible. (Note that this function and :func:`numpy.copy` are very
+ similar but have different default values for their order=
+ arguments, and this function always passes sub-classes through.)
+
+ See also
+ --------
+ numpy.copy : Similar function with different default behavior
+ numpy.copyto
+
+ Notes
+ -----
+ This function is the preferred method for creating an array copy. The
+ function :func:`numpy.copy` is similar, but it defaults to using order 'K',
+ and will not pass sub-classes through by default.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([[1,2,3],[4,5,6]], order='F')
+
+ >>> y = x.copy()
+
+ >>> x.fill(0)
+
+ >>> x
+ array([[0, 0, 0],
+ [0, 0, 0]])
+
+ >>> y
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> y.flags['C_CONTIGUOUS']
+ True
+
+ For arrays containing Python objects (e.g. dtype=object),
+ the copy is a shallow one. The new array will contain the
+ same object which may lead to surprises if that object can
+ be modified (is mutable):
+
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> b = a.copy()
+ >>> b[2][0] = 10
+ >>> a
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+
+ To ensure all elements within an ``object`` array are copied,
+ use `copy.deepcopy`:
+
+ >>> import copy
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> c = copy.deepcopy(a)
+ >>> c[2][0] = 10
+ >>> c
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+ >>> a
+ array([1, 'm', list([2, 3, 4])], dtype=object)
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod',
+ """
+ a.cumprod(axis=None, dtype=None, out=None)
+
+ Return the cumulative product of the elements along the given axis.
+
+ Refer to `numpy.cumprod` for full documentation.
+
+ See Also
+ --------
+ numpy.cumprod : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum',
+ """
+ a.cumsum(axis=None, dtype=None, out=None)
+
+ Return the cumulative sum of the elements along the given axis.
+
+ Refer to `numpy.cumsum` for full documentation.
+
+ See Also
+ --------
+ numpy.cumsum : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal',
+ """
+ a.diagonal(offset=0, axis1=0, axis2=1)
+
+ Return specified diagonals. In NumPy 1.9 the returned array is a
+ read-only view instead of a copy as in previous NumPy versions. In
+ a future version the read-only restriction will be removed.
+
+ Refer to :func:`numpy.diagonal` for full documentation.
+
+ See Also
+ --------
+ numpy.diagonal : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('dot'))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('dump',
+ """
+ a.dump(file)
+
+ Dump a pickle of the array to the specified file.
+ The array can be read back with pickle.load or numpy.load.
+
+ Parameters
+ ----------
+ file : str or Path
+ A string naming the dump file.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps',
+ """
+ a.dumps()
+
+ Returns the pickle of the array as a string.
+ pickle.loads will convert the string back to an array.
+
+ Parameters
+ ----------
+ None
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('fill',
+ """
+ a.fill(value)
+
+ Fill the array with a scalar value.
+
+ Parameters
+ ----------
+ value : scalar
+ All elements of `a` will be assigned this value.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1, 2])
+ >>> a.fill(0)
+ >>> a
+ array([0, 0])
+ >>> a = np.empty(2)
+ >>> a.fill(1)
+ >>> a
+ array([1., 1.])
+
+ Fill expects a scalar value and always behaves the same as assigning
+ to a single array element. The following is a rare example where this
+ distinction is important:
+
+ >>> a = np.array([None, None], dtype=object)
+ >>> a[0] = np.array(3)
+ >>> a
+ array([array(3), None], dtype=object)
+ >>> a.fill(np.array(3))
+ >>> a
+ array([array(3), array(3)], dtype=object)
+
+ Where other forms of assignments will unpack the array being assigned:
+
+ >>> a[...] = np.array(3)
+ >>> a
+ array([3, 3], dtype=object)
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten',
+ """
+ a.flatten(order='C')
+
+ Return a copy of the array collapsed into one dimension.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ 'C' means to flatten in row-major (C-style) order.
+ 'F' means to flatten in column-major (Fortran-
+ style) order. 'A' means to flatten in column-major
+ order if `a` is Fortran *contiguous* in memory,
+ row-major order otherwise. 'K' means to flatten
+ `a` in the order the elements occur in memory.
+ The default is 'C'.
+
+ Returns
+ -------
+ y : ndarray
+ A copy of the input array, flattened to one dimension.
+
+ See Also
+ --------
+ ravel : Return a flattened array.
+ flat : A 1-D flat iterator over the array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1,2], [3,4]])
+ >>> a.flatten()
+ array([1, 2, 3, 4])
+ >>> a.flatten('F')
+ array([1, 3, 2, 4])
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield',
+ """
+ a.getfield(dtype, offset=0)
+
+ Returns a field of the given array as a certain type.
+
+ A field is a view of the array data with a given data-type. The values in
+ the view are determined by the given type and the offset into the current
+ array in bytes. The offset needs to be such that the view dtype fits in the
+ array dtype; for example an array of dtype complex128 has 16-byte elements.
+ If taking a view with a 32-bit integer (4 bytes), the offset needs to be
+ between 0 and 12 bytes.
+
+ Parameters
+ ----------
+ dtype : str or dtype
+ The data type of the view. The dtype size of the view can not be larger
+ than that of the array itself.
+ offset : int
+ Number of bytes to skip before beginning the element view.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.diag([1.+1.j]*2)
+ >>> x[1, 1] = 2 + 4.j
+ >>> x
+ array([[1.+1.j, 0.+0.j],
+ [0.+0.j, 2.+4.j]])
+ >>> x.getfield(np.float64)
+ array([[1., 0.],
+ [0., 2.]])
+
+ By choosing an offset of 8 bytes we can select the complex part of the
+ array for our view:
+
+ >>> x.getfield(np.float64, offset=8)
+ array([[1., 0.],
+ [0., 4.]])
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('item',
+ """
+ a.item(*args)
+
+ Copy an element of an array to a standard Python scalar and return it.
+
+ Parameters
+ ----------
+ \\*args : Arguments (variable number and type)
+
+ * none: in this case, the method only works for arrays
+ with one element (`a.size == 1`), which element is
+ copied into a standard Python scalar object and returned.
+
+ * int_type: this argument is interpreted as a flat index into
+ the array, specifying which element to copy and return.
+
+ * tuple of int_types: functions as does a single int_type argument,
+ except that the argument is interpreted as an nd-index into the
+ array.
+
+ Returns
+ -------
+ z : Standard Python scalar object
+ A copy of the specified element of the array as a suitable
+ Python scalar
+
+ Notes
+ -----
+ When the data type of `a` is longdouble or clongdouble, item() returns
+ a scalar array object because there is no available Python scalar that
+ would not lose information. Void arrays return a buffer object for item(),
+ unless fields are defined, in which case a tuple is returned.
+
+ `item` is very similar to a[args], except, instead of an array scalar,
+ a standard Python scalar is returned. This can be useful for speeding up
+ access to elements of the array and doing arithmetic on elements of the
+ array using Python's optimized math.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.random.seed(123)
+ >>> x = np.random.randint(9, size=(3, 3))
+ >>> x
+ array([[2, 2, 6],
+ [1, 3, 6],
+ [1, 0, 1]])
+ >>> x.item(3)
+ 1
+ >>> x.item(7)
+ 0
+ >>> x.item((0, 1))
+ 2
+ >>> x.item((2, 2))
+ 1
+
+ For an array with object dtype, elements are returned as-is.
+
+ >>> a = np.array([np.int64(1)], dtype=object)
+ >>> a.item() #return np.int64
+ np.int64(1)
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('max',
+ """
+ a.max(axis=None, out=None, keepdims=False, initial=, where=True)
+
+ Return the maximum along a given axis.
+
+ Refer to `numpy.amax` for full documentation.
+
+ See Also
+ --------
+ numpy.amax : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('mean',
+ """
+ a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True)
+
+ Returns the average of the array elements along given axis.
+
+ Refer to `numpy.mean` for full documentation.
+
+ See Also
+ --------
+ numpy.mean : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('min',
+ """
+ a.min(axis=None, out=None, keepdims=False, initial=, where=True)
+
+ Return the minimum along a given axis.
+
+ Refer to `numpy.amin` for full documentation.
+
+ See Also
+ --------
+ numpy.amin : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero',
+ """
+ a.nonzero()
+
+ Return the indices of the elements that are non-zero.
+
+ Refer to `numpy.nonzero` for full documentation.
+
+ See Also
+ --------
+ numpy.nonzero : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('prod',
+ """
+ a.prod(axis=None, dtype=None, out=None, keepdims=False,
+ initial=1, where=True)
+
+ Return the product of the array elements over the given axis
+
+ Refer to `numpy.prod` for full documentation.
+
+ See Also
+ --------
+ numpy.prod : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('put',
+ """
+ a.put(indices, values, mode='raise')
+
+ Set ``a.flat[n] = values[n]`` for all `n` in indices.
+
+ Refer to `numpy.put` for full documentation.
+
+ See Also
+ --------
+ numpy.put : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel',
+ """
+ a.ravel([order])
+
+ Return a flattened array.
+
+ Refer to `numpy.ravel` for full documentation.
+
+ See Also
+ --------
+ numpy.ravel : equivalent function
+
+ ndarray.flat : a flat iterator on the array.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat',
+ """
+ a.repeat(repeats, axis=None)
+
+ Repeat elements of an array.
+
+ Refer to `numpy.repeat` for full documentation.
+
+ See Also
+ --------
+ numpy.repeat : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape',
+ """
+ a.reshape(shape, /, *, order='C', copy=None)
+
+ Returns an array containing the same data with a new shape.
+
+ Refer to `numpy.reshape` for full documentation.
+
+ See Also
+ --------
+ numpy.reshape : equivalent function
+
+ Notes
+ -----
+ Unlike the free function `numpy.reshape`, this method on `ndarray` allows
+ the elements of the shape parameter to be passed in as separate arguments.
+ For example, ``a.reshape(10, 11)`` is equivalent to
+ ``a.reshape((10, 11))``.
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('resize',
+ """
+ a.resize(new_shape, refcheck=True)
+
+ Change shape and size of array in-place.
+
+ Parameters
+ ----------
+ new_shape : tuple of ints, or `n` ints
+ Shape of resized array.
+ refcheck : bool, optional
+ If False, reference count will not be checked. Default is True.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ ValueError
+ If `a` does not own its own data or references or views to it exist,
+ and the data memory must be changed.
+ PyPy only: will always raise if the data memory must be changed, since
+ there is no reliable way to determine if references or views to it
+ exist.
+
+ SystemError
+ If the `order` keyword argument is specified. This behaviour is a
+ bug in NumPy.
+
+ See Also
+ --------
+ resize : Return a new array with the specified shape.
+
+ Notes
+ -----
+ This reallocates space for the data area if necessary.
+
+ Only contiguous arrays (data elements consecutive in memory) can be
+ resized.
+
+ The purpose of the reference count check is to make sure you
+ do not use this array as a buffer for another Python object and then
+ reallocate the memory. However, reference counts can increase in
+ other ways so if you are sure that you have not shared the memory
+ for this array with another Python object, then you may safely set
+ `refcheck` to False.
+
+ Examples
+ --------
+ Shrinking an array: array is flattened (in the order that the data are
+ stored in memory), resized, and reshaped:
+
+ >>> import numpy as np
+
+ >>> a = np.array([[0, 1], [2, 3]], order='C')
+ >>> a.resize((2, 1))
+ >>> a
+ array([[0],
+ [1]])
+
+ >>> a = np.array([[0, 1], [2, 3]], order='F')
+ >>> a.resize((2, 1))
+ >>> a
+ array([[0],
+ [2]])
+
+ Enlarging an array: as above, but missing entries are filled with zeros:
+
+ >>> b = np.array([[0, 1], [2, 3]])
+ >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
+ >>> b
+ array([[0, 1, 2],
+ [3, 0, 0]])
+
+ Referencing an array prevents resizing...
+
+ >>> c = a
+ >>> a.resize((1, 1))
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot resize an array that references or is referenced ...
+
+ Unless `refcheck` is False:
+
+ >>> a.resize((1, 1), refcheck=False)
+ >>> a
+ array([[0]])
+ >>> c
+ array([[0]])
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('round',
+ """
+ a.round(decimals=0, out=None)
+
+ Return `a` with each element rounded to the given number of decimals.
+
+ Refer to `numpy.around` for full documentation.
+
+ See Also
+ --------
+ numpy.around : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted',
+ """
+ a.searchsorted(v, side='left', sorter=None)
+
+ Find indices where elements of v should be inserted in a to maintain order.
+
+ For full documentation, see `numpy.searchsorted`
+
+ See Also
+ --------
+ numpy.searchsorted : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield',
+ """
+ a.setfield(val, dtype, offset=0)
+
+ Put a value into a specified place in a field defined by a data-type.
+
+ Place `val` into `a`'s field defined by `dtype` and beginning `offset`
+ bytes into the field.
+
+ Parameters
+ ----------
+ val : object
+ Value to be placed in field.
+ dtype : dtype object
+ Data-type of the field in which to place `val`.
+ offset : int, optional
+ The number of bytes into the field at which to place `val`.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ getfield
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.eye(3)
+ >>> x.getfield(np.float64)
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+ >>> x.setfield(3, np.int32)
+ >>> x.getfield(np.int32)
+ array([[3, 3, 3],
+ [3, 3, 3],
+ [3, 3, 3]], dtype=int32)
+ >>> x
+ array([[1.0e+000, 1.5e-323, 1.5e-323],
+ [1.5e-323, 1.0e+000, 1.5e-323],
+ [1.5e-323, 1.5e-323, 1.0e+000]])
+ >>> x.setfield(np.eye(3), np.int32)
+ >>> x
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags',
+ """
+ a.setflags(write=None, align=None, uic=None)
+
+ Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY,
+ respectively.
+
+ These Boolean-valued flags affect how numpy interprets the memory
+ area used by `a` (see Notes below). The ALIGNED flag can only
+ be set to True if the data is actually aligned according to the type.
+ The WRITEBACKIFCOPY flag can never be set
+ to True. The flag WRITEABLE can only be set to True if the array owns its
+ own memory, or the ultimate owner of the memory exposes a writeable buffer
+ interface, or is a string. (The exception for string is made so that
+ unpickling can be done without copying memory.)
+
+ Parameters
+ ----------
+ write : bool, optional
+ Describes whether or not `a` can be written to.
+ align : bool, optional
+ Describes whether or not `a` is aligned properly for its type.
+ uic : bool, optional
+ Describes whether or not `a` is a copy of another "base" array.
+
+ Notes
+ -----
+ Array flags provide information about how the memory area used
+ for the array is to be interpreted. There are 7 Boolean flags
+ in use, only three of which can be changed by the user:
+ WRITEBACKIFCOPY, WRITEABLE, and ALIGNED.
+
+ WRITEABLE (W) the data area can be written to;
+
+ ALIGNED (A) the data and strides are aligned appropriately for the hardware
+ (as determined by the compiler);
+
+ WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
+ by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
+ called, the base array will be updated with the contents of this array.
+
+ All flags can be accessed using the single (upper case) letter as well
+ as the full name.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> y = np.array([[3, 1, 7],
+ ... [2, 0, 0],
+ ... [8, 5, 9]])
+ >>> y
+ array([[3, 1, 7],
+ [2, 0, 0],
+ [8, 5, 9]])
+ >>> y.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+ >>> y.setflags(write=0, align=0)
+ >>> y.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : True
+ WRITEABLE : False
+ ALIGNED : False
+ WRITEBACKIFCOPY : False
+ >>> y.setflags(uic=1)
+ Traceback (most recent call last):
+ File "", line 1, in
+ ValueError: cannot set WRITEBACKIFCOPY flag to True
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('sort',
+ """
+ a.sort(axis=-1, kind=None, order=None)
+
+ Sort an array in-place. Refer to `numpy.sort` for full documentation.
+
+ Parameters
+ ----------
+ axis : int, optional
+ Axis along which to sort. Default is -1, which means sort along the
+ last axis.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort under the covers and, in general, the
+ actual implementation will vary with datatype. The 'mergesort' option
+ is retained for backwards compatibility.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+
+ See Also
+ --------
+ numpy.sort : Return a sorted copy of an array.
+ numpy.argsort : Indirect sort.
+ numpy.lexsort : Indirect stable sort on multiple keys.
+ numpy.searchsorted : Find elements in sorted array.
+ numpy.partition: Partial sort.
+
+ Notes
+ -----
+ See `numpy.sort` for notes on the different sorting algorithms.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1,4], [3,1]])
+ >>> a.sort(axis=1)
+ >>> a
+ array([[1, 4],
+ [1, 3]])
+ >>> a.sort(axis=0)
+ >>> a
+ array([[1, 3],
+ [1, 4]])
+
+ Use the `order` keyword to specify a field to use when sorting a
+ structured array:
+
+ >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
+ >>> a.sort(order='y')
+ >>> a
+ array([(b'c', 1), (b'a', 2)],
+ dtype=[('x', 'S1'), ('y', '>> import numpy as np
+ >>> a = np.array([3, 4, 2, 1])
+ >>> a.partition(3)
+ >>> a
+ array([2, 1, 3, 4]) # may vary
+
+ >>> a.partition((1, 3))
+ >>> a
+ array([1, 2, 3, 4])
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze',
+ """
+ a.squeeze(axis=None)
+
+ Remove axes of length one from `a`.
+
+ Refer to `numpy.squeeze` for full documentation.
+
+ See Also
+ --------
+ numpy.squeeze : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('std',
+ """
+ a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
+
+ Returns the standard deviation of the array elements along given axis.
+
+ Refer to `numpy.std` for full documentation.
+
+ See Also
+ --------
+ numpy.std : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('sum',
+ """
+ a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
+
+ Return the sum of the array elements over the given axis.
+
+ Refer to `numpy.sum` for full documentation.
+
+ See Also
+ --------
+ numpy.sum : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes',
+ """
+ a.swapaxes(axis1, axis2)
+
+ Return a view of the array with `axis1` and `axis2` interchanged.
+
+ Refer to `numpy.swapaxes` for full documentation.
+
+ See Also
+ --------
+ numpy.swapaxes : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('take',
+ """
+ a.take(indices, axis=None, out=None, mode='raise')
+
+ Return an array formed from the elements of `a` at the given indices.
+
+ Refer to `numpy.take` for full documentation.
+
+ See Also
+ --------
+ numpy.take : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile',
+ """
+ a.tofile(fid, sep="", format="%s")
+
+ Write array to a file as text or binary (default).
+
+ Data is always written in 'C' order, independent of the order of `a`.
+ The data produced by this method can be recovered using the function
+ fromfile().
+
+ Parameters
+ ----------
+ fid : file or str or Path
+ An open file object, or a string containing a filename.
+ sep : str
+ Separator between array items for text output.
+ If "" (empty), a binary file is written, equivalent to
+ ``file.write(a.tobytes())``.
+ format : str
+ Format string for text file output.
+ Each entry in the array is formatted to text by first converting
+ it to the closest Python type, and then using "format" % item.
+
+ Notes
+ -----
+ This is a convenience function for quick storage of array data.
+ Information on endianness and precision is lost, so this method is not a
+ good choice for files intended to archive data or transport data between
+ machines with different endianness. Some of these problems can be overcome
+ by outputting the data as text files, at the expense of speed and file
+ size.
+
+ When fid is a file object, array contents are directly written to the
+ file, bypassing the file object's ``write`` method. As a result, tofile
+ cannot be used with files objects supporting compression (e.g., GzipFile)
+ or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist',
+ """
+ a.tolist()
+
+ Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
+
+ Return a copy of the array data as a (nested) Python list.
+ Data items are converted to the nearest compatible builtin Python type, via
+ the `~numpy.ndarray.item` function.
+
+ If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
+ not be a list at all, but a simple Python scalar.
+
+ Parameters
+ ----------
+ none
+
+ Returns
+ -------
+ y : object, or list of object, or list of list of object, or ...
+ The possibly nested list of array elements.
+
+ Notes
+ -----
+ The array may be recreated via ``a = np.array(a.tolist())``, although this
+ may sometimes lose precision.
+
+ Examples
+ --------
+ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
+ except that ``tolist`` changes numpy scalars to Python scalars:
+
+ >>> import numpy as np
+ >>> a = np.uint32([1, 2])
+ >>> a_list = list(a)
+ >>> a_list
+ [np.uint32(1), np.uint32(2)]
+ >>> type(a_list[0])
+
+ >>> a_tolist = a.tolist()
+ >>> a_tolist
+ [1, 2]
+ >>> type(a_tolist[0])
+
+
+ Additionally, for a 2D array, ``tolist`` applies recursively:
+
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> list(a)
+ [array([1, 2]), array([3, 4])]
+ >>> a.tolist()
+ [[1, 2], [3, 4]]
+
+ The base case for this recursion is a 0D array:
+
+ >>> a = np.array(1)
+ >>> list(a)
+ Traceback (most recent call last):
+ ...
+ TypeError: iteration over a 0-d array
+ >>> a.tolist()
+ 1
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """
+ a.tobytes(order='C')
+
+ Construct Python bytes containing the raw data bytes in the array.
+
+ Constructs Python bytes showing a copy of the raw contents of
+ data memory. The bytes object is produced in C-order by default.
+ This behavior is controlled by the ``order`` parameter.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A'}, optional
+ Controls the memory layout of the bytes object. 'C' means C-order,
+ 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
+ Fortran contiguous, 'C' otherwise. Default is 'C'.
+
+ Returns
+ -------
+ s : bytes
+ Python bytes exhibiting a copy of `a`'s raw data.
+
+ See also
+ --------
+ frombuffer
+ Inverse of this operation, construct a 1-dimensional array from Python
+ bytes.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes()
+ b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
+ >>> x.tobytes('C') == x.tobytes()
+ True
+ >>> x.tobytes('F')
+ b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('trace',
+ """
+ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
+
+ Return the sum along diagonals of the array.
+
+ Refer to `numpy.trace` for full documentation.
+
+ See Also
+ --------
+ numpy.trace : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose',
+ """
+ a.transpose(*axes)
+
+ Returns a view of the array with axes transposed.
+
+ Refer to `numpy.transpose` for full documentation.
+
+ Parameters
+ ----------
+ axes : None, tuple of ints, or `n` ints
+
+ * None or no argument: reverses the order of the axes.
+
+ * tuple of ints: `i` in the `j`-th place in the tuple means that the
+ array's `i`-th axis becomes the transposed array's `j`-th axis.
+
+ * `n` ints: same as an n-tuple of the same ints (this form is
+ intended simply as a "convenience" alternative to the tuple form).
+
+ Returns
+ -------
+ p : ndarray
+ View of the array with its axes suitably permuted.
+
+ See Also
+ --------
+ transpose : Equivalent function.
+ ndarray.T : Array property returning the array transposed.
+ ndarray.reshape : Give a new shape to an array without changing its data.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> a.transpose()
+ array([[1, 3],
+ [2, 4]])
+ >>> a.transpose((1, 0))
+ array([[1, 3],
+ [2, 4]])
+ >>> a.transpose(1, 0)
+ array([[1, 3],
+ [2, 4]])
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> a
+ array([1, 2, 3, 4])
+ >>> a.transpose()
+ array([1, 2, 3, 4])
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('var',
+ """
+ a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
+
+ Returns the variance of the array elements, along given axis.
+
+ Refer to `numpy.var` for full documentation.
+
+ See Also
+ --------
+ numpy.var : equivalent function
+
+ """))
+
+
+add_newdoc('numpy._core.multiarray', 'ndarray', ('view',
+ """
+ a.view([dtype][, type])
+
+ New view of array with the same data.
+
+ .. note::
+ Passing None for ``dtype`` is different from omitting the parameter,
+ since the former invokes ``dtype(None)`` which is an alias for
+ ``dtype('float64')``.
+
+ Parameters
+ ----------
+ dtype : data-type or ndarray sub-class, optional
+ Data-type descriptor of the returned view, e.g., float32 or int16.
+ Omitting it results in the view having the same data-type as `a`.
+ This argument can also be specified as an ndarray sub-class, which
+ then specifies the type of the returned object (this is equivalent to
+ setting the ``type`` parameter).
+ type : Python type, optional
+ Type of the returned view, e.g., ndarray or matrix. Again, omission
+ of the parameter results in type preservation.
+
+ Notes
+ -----
+ ``a.view()`` is used two different ways:
+
+ ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
+ of the array's memory with a different data-type. This can cause a
+ reinterpretation of the bytes of memory.
+
+ ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
+ returns an instance of `ndarray_subclass` that looks at the same array
+ (same shape, dtype, etc.) This does not cause a reinterpretation of the
+ memory.
+
+ For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
+ bytes per entry than the previous dtype (for example, converting a regular
+ array to a structured array), then the last axis of ``a`` must be
+ contiguous. This axis will be resized in the result.
+
+ .. versionchanged:: 1.23.0
+ Only the last axis needs to be contiguous. Previously, the entire array
+ had to be C-contiguous.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([(-1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
+
+ Viewing array data using a different type and dtype:
+
+ >>> nonneg = np.dtype([("a", np.uint8), ("b", np.uint8)])
+ >>> y = x.view(dtype=nonneg, type=np.recarray)
+ >>> x["a"]
+ array([-1], dtype=int8)
+ >>> y.a
+ array([255], dtype=uint8)
+
+ Creating a view on a structured array so it can be used in calculations
+
+ >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
+ >>> xv = x.view(dtype=np.int8).reshape(-1,2)
+ >>> xv
+ array([[1, 2],
+ [3, 4]], dtype=int8)
+ >>> xv.mean(0)
+ array([2., 3.])
+
+ Making changes to the view changes the underlying array
+
+ >>> xv[0,1] = 20
+ >>> x
+ array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
+
+ Using a view to convert an array to a recarray:
+
+ >>> z = x.view(np.recarray)
+ >>> z.a
+ array([1, 3], dtype=int8)
+
+ Views share data:
+
+ >>> x[0] = (9, 10)
+ >>> z[0]
+ np.record((9, 10), dtype=[('a', 'i1'), ('b', 'i1')])
+
+ Views that change the dtype size (bytes per entry) should normally be
+ avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
+
+ >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)
+ >>> y = x[:, ::2]
+ >>> y
+ array([[1, 3],
+ [4, 6]], dtype=int16)
+ >>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
+ Traceback (most recent call last):
+ ...
+ ValueError: To change to a dtype of a different size, the last axis must be contiguous
+ >>> z = y.copy()
+ >>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
+ array([[(1, 3)],
+ [(4, 6)]], dtype=[('width', '>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4)
+ >>> x.transpose(1, 0, 2).view(np.int16)
+ array([[[ 256, 770],
+ [3340, 3854]],
+
+ [[1284, 1798],
+ [4368, 4882]],
+
+ [[2312, 2826],
+ [5396, 5910]]], dtype=int16)
+
+ """))
+
+
+##############################################################################
+#
+# umath functions
+#
+##############################################################################
+
+add_newdoc('numpy._core.umath', 'frompyfunc',
+ """
+ frompyfunc(func, /, nin, nout, *[, identity])
+
+ Takes an arbitrary Python function and returns a NumPy ufunc.
+
+ Can be used, for example, to add broadcasting to a built-in Python
+ function (see Examples section).
+
+ Parameters
+ ----------
+ func : Python function object
+ An arbitrary Python function.
+ nin : int
+ The number of input arguments.
+ nout : int
+ The number of objects returned by `func`.
+ identity : object, optional
+ The value to use for the `~numpy.ufunc.identity` attribute of the resulting
+ object. If specified, this is equivalent to setting the underlying
+ C ``identity`` field to ``PyUFunc_IdentityValue``.
+ If omitted, the identity is set to ``PyUFunc_None``. Note that this is
+ _not_ equivalent to setting the identity to ``None``, which implies the
+ operation is reorderable.
+
+ Returns
+ -------
+ out : ufunc
+ Returns a NumPy universal function (``ufunc``) object.
+
+ See Also
+ --------
+ vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
+
+ Notes
+ -----
+ The returned ufunc always returns PyObject arrays.
+
+ Examples
+ --------
+ Use frompyfunc to add broadcasting to the Python function ``oct``:
+
+ >>> import numpy as np
+ >>> oct_array = np.frompyfunc(oct, 1, 1)
+ >>> oct_array(np.array((10, 30, 100)))
+ array(['0o12', '0o36', '0o144'], dtype=object)
+ >>> np.array((oct(10), oct(30), oct(100))) # for comparison
+ array(['0o12', '0o36', '0o144'], dtype='doc is NULL.)
+
+ Parameters
+ ----------
+ ufunc : numpy.ufunc
+ A ufunc whose current doc is NULL.
+ new_docstring : string
+ The new docstring for the ufunc.
+
+ Notes
+ -----
+ This method allocates memory for new_docstring on
+ the heap. Technically this creates a memory leak, since this
+ memory will not be reclaimed until the end of the program
+ even if the ufunc itself is removed. However this will only
+ be a problem if the user is repeatedly creating ufuncs with
+ no documentation, adding documentation via add_newdoc_ufunc,
+ and then throwing away the ufunc.
+ """)
+
+add_newdoc('numpy._core.multiarray', 'get_handler_name',
+ """
+ get_handler_name(a: ndarray) -> str,None
+
+ Return the name of the memory handler used by `a`. If not provided, return
+ the name of the memory handler that will be used to allocate data for the
+ next `ndarray` in this context. May return None if `a` does not own its
+ memory, in which case you can traverse ``a.base`` for a memory handler.
+ """)
+
+add_newdoc('numpy._core.multiarray', 'get_handler_version',
+ """
+ get_handler_version(a: ndarray) -> int,None
+
+ Return the version of the memory handler used by `a`. If not provided,
+ return the version of the memory handler that will be used to allocate data
+ for the next `ndarray` in this context. May return None if `a` does not own
+ its memory, in which case you can traverse ``a.base`` for a memory handler.
+ """)
+
+add_newdoc('numpy._core._multiarray_umath', '_array_converter',
+ """
+ _array_converter(*array_likes)
+
+ Helper to convert one or more objects to arrays. Integrates machinery
+ to deal with the ``result_type`` and ``__array_wrap__``.
+
+ The reason for this is that e.g. ``result_type`` needs to convert to arrays
+ to find the ``dtype``. But converting to an array before calling
+ ``result_type`` would incorrectly "forget" whether it was a Python int,
+ float, or complex.
+ """)
+
+add_newdoc(
+ 'numpy._core._multiarray_umath', '_array_converter', ('scalar_input',
+ """
+ A tuple which indicates for each input whether it was a scalar that
+ was coerced to a 0-D array (and was not already an array or something
+ converted via a protocol like ``__array__()``).
+ """))
+
+add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('as_arrays',
+ """
+ as_arrays(/, subok=True, pyscalars="convert_if_no_array")
+
+ Return the inputs as arrays or scalars.
+
+ Parameters
+ ----------
+ subok : True or False, optional
+ Whether array subclasses are preserved.
+ pyscalars : {"convert", "preserve", "convert_if_no_array"}, optional
+ To allow NEP 50 weak promotion later, it may be desirable to preserve
+ Python scalars. As default, these are preserved unless all inputs
+ are Python scalars. "convert" enforces an array return.
+ """))
+
+add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('result_type',
+ """result_type(/, extra_dtype=None, ensure_inexact=False)
+
+ Find the ``result_type`` just as ``np.result_type`` would, but taking
+ into account that the original inputs (before converting to an array) may
+ have been Python scalars with weak promotion.
+
+ Parameters
+ ----------
+ extra_dtype : dtype instance or class
+ An additional DType or dtype instance to promote (e.g. could be used
+ to ensure the result precision is at least float32).
+ ensure_inexact : True or False
+ When ``True``, ensures a floating point (or complex) result replacing
+ the ``arr * 1.`` or ``result_type(..., 0.0)`` pattern.
+ """))
+
+add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('wrap',
+ """
+ wrap(arr, /, to_scalar=None)
+
+ Call ``__array_wrap__`` on ``arr`` if ``arr`` is not the same subclass
+ as the input the ``__array_wrap__`` method was retrieved from.
+
+ Parameters
+ ----------
+ arr : ndarray
+ The object to be wrapped. Normally an ndarray or subclass,
+ although for backward compatibility NumPy scalars are also accepted
+ (these will be converted to a NumPy array before being passed on to
+ the ``__array_wrap__`` method).
+ to_scalar : {True, False, None}, optional
+ When ``True`` will convert a 0-d array to a scalar via ``result[()]``
+ (with a fast-path for non-subclasses). If ``False`` the result should
+ be an array-like (as ``__array_wrap__`` is free to return a non-array).
+ By default (``None``), a scalar is returned if all inputs were scalar.
+ """))
+
+
+add_newdoc('numpy._core.multiarray', '_get_madvise_hugepage',
+ """
+ _get_madvise_hugepage() -> bool
+
+ Get use of ``madvise (2)`` MADV_HUGEPAGE support when
+ allocating the array data. Returns the currently set value.
+ See `global_state` for more information.
+ """)
+
+add_newdoc('numpy._core.multiarray', '_set_madvise_hugepage',
+ """
+ _set_madvise_hugepage(enabled: bool) -> bool
+
+ Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when
+ allocating the array data. Returns the previously set value.
+ See `global_state` for more information.
+ """)
+
+
+##############################################################################
+#
+# Documentation for ufunc attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ufunc object
+#
+##############################################################################
+
+add_newdoc('numpy._core', 'ufunc',
+ """
+ Functions that operate element by element on whole arrays.
+
+ To see the documentation for a specific ufunc, use `info`. For
+ example, ``np.info(np.sin)``. Because ufuncs are written in C
+ (for speed) and linked into Python with NumPy's ufunc facility,
+ Python's help() function finds this page whenever help() is called
+ on a ufunc.
+
+ A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
+
+ **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)``
+
+ Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
+
+ The broadcasting rules are:
+
+ * Dimensions of length 1 may be prepended to either array.
+ * Arrays may be repeated along dimensions of length 1.
+
+ Parameters
+ ----------
+ *x : array_like
+ Input arrays.
+ out : ndarray, None, ..., or tuple of ndarray and None, optional
+ Location(s) into which the result(s) are stored.
+ If not provided or None, new array(s) are created by the ufunc.
+ If passed as a keyword argument, can be Ellipses (``out=...``) to
+ ensure an array is returned even if the result is 0-dimensional,
+ or a tuple with length equal to the number of outputs (where None
+ can be used for allocation by the ufunc).
+
+ .. versionadded:: 2.3
+ Support for ``out=...`` was added.
+
+ where : array_like, optional
+ This condition is broadcast over the input. At locations where the
+ condition is True, the `out` array will be set to the ufunc result.
+ Elsewhere, the `out` array will retain its original value.
+ Note that if an uninitialized `out` array is created via the default
+ ``out=None``, locations within it where the condition is False will
+ remain uninitialized.
+ **kwargs
+ For other keyword-only arguments, see the :ref:`ufunc docs `.
+
+ Returns
+ -------
+ r : ndarray or tuple of ndarray
+ `r` will have the shape that the arrays in `x` broadcast to; if `out` is
+ provided, it will be returned. If not, `r` will be allocated and
+ may contain uninitialized values. If the function has more than one
+ output, then the result will be a tuple of arrays.
+
+ """)
+
+
+##############################################################################
+#
+# ufunc attributes
+#
+##############################################################################
+
+add_newdoc('numpy._core', 'ufunc', ('identity',
+ """
+ The identity value.
+
+ Data attribute containing the identity element for the ufunc,
+ if it has one. If it does not, the attribute value is None.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.add.identity
+ 0
+ >>> np.multiply.identity
+ 1
+ >>> print(np.power.identity)
+ None
+ >>> print(np.exp.identity)
+ None
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('nargs',
+ """
+ The number of arguments.
+
+ Data attribute containing the number of arguments the ufunc takes, including
+ optional ones.
+
+ Notes
+ -----
+ Typically this value will be one more than what you might expect
+ because all ufuncs take the optional "out" argument.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.add.nargs
+ 3
+ >>> np.multiply.nargs
+ 3
+ >>> np.power.nargs
+ 3
+ >>> np.exp.nargs
+ 2
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('nin',
+ """
+ The number of inputs.
+
+ Data attribute containing the number of arguments the ufunc treats as input.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.add.nin
+ 2
+ >>> np.multiply.nin
+ 2
+ >>> np.power.nin
+ 2
+ >>> np.exp.nin
+ 1
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('nout',
+ """
+ The number of outputs.
+
+ Data attribute containing the number of arguments the ufunc treats as output.
+
+ Notes
+ -----
+ Since all ufuncs can take output arguments, this will always be at least 1.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.add.nout
+ 1
+ >>> np.multiply.nout
+ 1
+ >>> np.power.nout
+ 1
+ >>> np.exp.nout
+ 1
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('ntypes',
+ """
+ The number of types.
+
+ The number of numerical NumPy types - of which there are 18 total - on which
+ the ufunc can operate.
+
+ See Also
+ --------
+ numpy.ufunc.types
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.add.ntypes
+ 22
+ >>> np.multiply.ntypes
+ 23
+ >>> np.power.ntypes
+ 21
+ >>> np.exp.ntypes
+ 10
+ >>> np.remainder.ntypes
+ 16
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('types',
+ """
+ Returns a list with types grouped input->output.
+
+ Data attribute listing the data-type "Domain-Range" groupings the ufunc can
+ deliver. The data-types are given using the character codes.
+
+ See Also
+ --------
+ numpy.ufunc.ntypes
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.add.types
+ ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', ...
+
+ >>> np.power.types
+ ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ...
+
+ >>> np.exp.types
+ ['e->e', 'f->f', 'd->d', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
+
+ >>> np.remainder.types
+ ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ...
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('signature',
+ """
+ Definition of the core elements a generalized ufunc operates on.
+
+ The signature determines how the dimensions of each input/output array
+ are split into core and loop dimensions:
+
+ 1. Each dimension in the signature is matched to a dimension of the
+ corresponding passed-in array, starting from the end of the shape tuple.
+ 2. Core dimensions assigned to the same label in the signature must have
+ exactly matching sizes, no broadcasting is performed.
+ 3. The core dimensions are removed from all inputs and the remaining
+ dimensions are broadcast together, defining the loop dimensions.
+
+ Notes
+ -----
+ Generalized ufuncs are used internally in many linalg functions, and in
+ the testing suite; the examples below are taken from these.
+ For ufuncs that operate on scalars, the signature is None, which is
+ equivalent to '()' for every argument.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.linalg._umath_linalg.det.signature
+ '(m,m)->()'
+ >>> np.matmul.signature
+ '(n?,k),(k,m?)->(n?,m?)'
+ >>> np.add.signature is None
+ True # equivalent to '(),()->()'
+ """))
+
+##############################################################################
+#
+# ufunc methods
+#
+##############################################################################
+
+add_newdoc('numpy._core', 'ufunc', ('reduce',
+ """
+ reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True)
+
+ Reduces `array`'s dimension by one, by applying ufunc along one axis.
+
+ Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
+ :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
+ the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
+ ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
+ For a one-dimensional array, reduce produces results equivalent to:
+ ::
+
+ r = op.identity # op = ufunc
+ for i in range(len(A)):
+ r = op(r, A[i])
+ return r
+
+ For example, add.reduce() is equivalent to sum().
+
+ Parameters
+ ----------
+ array : array_like
+ The array to act on.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a reduction is performed.
+ The default (`axis` = 0) is perform a reduction over the first
+ dimension of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ If this is None, a reduction is performed over all the axes.
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+
+ For operations which are either not commutative or not associative,
+ doing a reduction over multiple axes is not well-defined. The
+ ufuncs do not currently raise an exception in this case, but will
+ likely do so in the future.
+ dtype : data-type code, optional
+ The data type used to perform the operation. Defaults to that of
+ ``out`` if given, and the data type of ``array`` otherwise (though
+ upcast to conserve precision for some cases, such as
+ ``numpy.add.reduce`` for integer or boolean input).
+ out : ndarray, None, ..., or tuple of ndarray and None, optional
+ Location into which the result is stored.
+ If not provided or None, a freshly-allocated array is returned.
+ If passed as a keyword argument, can be Ellipses (``out=...``) to
+ ensure an array is returned even if the result is 0-dimensional
+ (which is useful especially for object dtype), or a 1-element tuple
+ (latter for consistency with ``ufunc.__call__``).
+
+ .. versionadded:: 2.3
+ Support for ``out=...`` was added.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `array`.
+ initial : scalar, optional
+ The value with which to start the reduction.
+ If the ufunc has no identity or the dtype is object, this defaults
+ to None - otherwise it defaults to ufunc.identity.
+ If ``None`` is given, the first element of the reduction is used,
+ and an error is thrown if the reduction is empty.
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `array`, and selects elements to include in the reduction. Note
+ that for ufuncs like ``minimum`` that do not have an identity
+ defined, one has to pass in also ``initial``.
+
+ Returns
+ -------
+ r : ndarray
+ The reduced array. If `out` was supplied, `r` is a reference to it.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.multiply.reduce([2,3,5])
+ 30
+
+ A multi-dimensional array example:
+
+ >>> X = np.arange(8).reshape((2,2,2))
+ >>> X
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> np.add.reduce(X, 0)
+ array([[ 4, 6],
+ [ 8, 10]])
+ >>> np.add.reduce(X) # confirm: default axis value is 0
+ array([[ 4, 6],
+ [ 8, 10]])
+ >>> np.add.reduce(X, 1)
+ array([[ 2, 4],
+ [10, 12]])
+ >>> np.add.reduce(X, 2)
+ array([[ 1, 5],
+ [ 9, 13]])
+
+ You can use the ``initial`` keyword argument to initialize the reduction
+ with a different value, and ``where`` to select specific elements to include:
+
+ >>> np.add.reduce([10], initial=5)
+ 15
+ >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
+ array([14., 14.])
+ >>> a = np.array([10., np.nan, 10])
+ >>> np.add.reduce(a, where=~np.isnan(a))
+ 20.0
+
+ Allows reductions of empty arrays where they would normally fail, i.e.
+ for ufuncs without an identity.
+
+ >>> np.minimum.reduce([], initial=np.inf)
+ inf
+ >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
+ array([ 1., 10.])
+ >>> np.minimum.reduce([])
+ Traceback (most recent call last):
+ ...
+ ValueError: zero-size array to reduction operation minimum which has no identity
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('accumulate',
+ """
+ accumulate(array, axis=0, dtype=None, out=None)
+
+ Accumulate the result of applying the operator to all elements.
+
+ For a one-dimensional array, accumulate produces results equivalent to::
+
+ r = np.empty(len(A))
+ t = op.identity # op = the ufunc being applied to A's elements
+ for i in range(len(A)):
+ t = op(t, A[i])
+ r[i] = t
+ return r
+
+ For example, add.accumulate() is equivalent to np.cumsum().
+
+ For a multi-dimensional array, accumulate is applied along only one
+ axis (axis zero by default; see Examples below) so repeated use is
+ necessary if one wants to accumulate over multiple axes.
+
+ Parameters
+ ----------
+ array : array_like
+ The array to act on.
+ axis : int, optional
+ The axis along which to apply the accumulation; default is zero.
+ dtype : data-type code, optional
+ The data-type used to represent the intermediate results. Defaults
+ to the data-type of the output array if such is provided, or the
+ data-type of the input array if no output array is provided.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ Location into which the result is stored.
+ If not provided or None, a freshly-allocated array is returned.
+ For consistency with ``ufunc.__call__``, if passed as a keyword
+ argument, can be Ellipses (``out=...``, which has the same effect
+ as None as an array is always returned), or a 1-element tuple.
+
+ Returns
+ -------
+ r : ndarray
+ The accumulated values. If `out` was supplied, `r` is a reference to
+ `out`.
+
+ Examples
+ --------
+ 1-D array examples:
+
+ >>> import numpy as np
+ >>> np.add.accumulate([2, 3, 5])
+ array([ 2, 5, 10])
+ >>> np.multiply.accumulate([2, 3, 5])
+ array([ 2, 6, 30])
+
+ 2-D array examples:
+
+ >>> I = np.eye(2)
+ >>> I
+ array([[1., 0.],
+ [0., 1.]])
+
+ Accumulate along axis 0 (rows), down columns:
+
+ >>> np.add.accumulate(I, 0)
+ array([[1., 0.],
+ [1., 1.]])
+ >>> np.add.accumulate(I) # no axis specified = axis zero
+ array([[1., 0.],
+ [1., 1.]])
+
+ Accumulate along axis 1 (columns), through rows:
+
+ >>> np.add.accumulate(I, 1)
+ array([[1., 1.],
+ [0., 1.]])
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('reduceat',
+ """
+ reduceat(array, indices, axis=0, dtype=None, out=None)
+
+ Performs a (local) reduce with specified slices over a single axis.
+
+ For i in ``range(len(indices))``, `reduceat` computes
+ ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
+ generalized "row" parallel to `axis` in the final result (i.e., in a
+ 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
+ `axis = 1`, it becomes the i-th column). There are three exceptions to this:
+
+ * when ``i = len(indices) - 1`` (so for the last index),
+ ``indices[i+1] = array.shape[axis]``.
+ * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
+ simply ``array[indices[i]]``.
+ * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
+
+ The shape of the output depends on the size of `indices`, and may be
+ larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
+
+ Parameters
+ ----------
+ array : array_like
+ The array to act on.
+ indices : array_like
+ Paired indices, comma separated (not colon), specifying slices to
+ reduce.
+ axis : int, optional
+ The axis along which to apply the reduceat.
+ dtype : data-type code, optional
+ The data type used to perform the operation. Defaults to that of
+ ``out`` if given, and the data type of ``array`` otherwise (though
+ upcast to conserve precision for some cases, such as
+ ``numpy.add.reduce`` for integer or boolean input).
+ out : ndarray, None, or tuple of ndarray and None, optional
+ Location into which the result is stored.
+ If not provided or None, a freshly-allocated array is returned.
+ For consistency with ``ufunc.__call__``, if passed as a keyword
+ argument, can be Ellipses (``out=...``, which has the same effect
+ as None as an array is always returned), or a 1-element tuple.
+
+ Returns
+ -------
+ r : ndarray
+ The reduced values. If `out` was supplied, `r` is a reference to
+ `out`.
+
+ Notes
+ -----
+ A descriptive example:
+
+ If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
+ ``ufunc.reduceat(array, indices)[::2]`` where `indices` is
+ ``range(len(array) - 1)`` with a zero placed
+ in every other element:
+ ``indices = zeros(2 * len(array) - 1)``,
+ ``indices[1::2] = range(1, len(array))``.
+
+ Don't be fooled by this attribute's name: `reduceat(array)` is not
+ necessarily smaller than `array`.
+
+ Examples
+ --------
+ To take the running sum of four successive values:
+
+ >>> import numpy as np
+ >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
+ array([ 6, 10, 14, 18])
+
+ A 2-D example:
+
+ >>> x = np.linspace(0, 15, 16).reshape(4,4)
+ >>> x
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+
+ ::
+
+ # reduce such that the result has the following five rows:
+ # [row1 + row2 + row3]
+ # [row4]
+ # [row2]
+ # [row3]
+ # [row1 + row2 + row3 + row4]
+
+ >>> np.add.reduceat(x, [0, 3, 1, 2, 0])
+ array([[12., 15., 18., 21.],
+ [12., 13., 14., 15.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [24., 28., 32., 36.]])
+
+ ::
+
+ # reduce such that result has the following two columns:
+ # [col1 * col2 * col3, col4]
+
+ >>> np.multiply.reduceat(x, [0, 3], 1)
+ array([[ 0., 3.],
+ [ 120., 7.],
+ [ 720., 11.],
+ [2184., 15.]])
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('outer',
+ r"""
+ outer(A, B, /, **kwargs)
+
+ Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
+
+ Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
+ ``op.outer(A, B)`` is an array of dimension M + N such that:
+
+ .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
+ op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
+
+ For `A` and `B` one-dimensional, this is equivalent to::
+
+ r = empty(len(A),len(B))
+ for i in range(len(A)):
+ for j in range(len(B)):
+ r[i,j] = op(A[i], B[j]) # op = ufunc in question
+
+ Parameters
+ ----------
+ A : array_like
+ First array
+ B : array_like
+ Second array
+ kwargs : any
+ Arguments to pass on to the ufunc. Typically `dtype` or `out`.
+ See `ufunc` for a comprehensive overview of all available arguments.
+
+ Returns
+ -------
+ r : ndarray
+ Output array
+
+ See Also
+ --------
+ numpy.outer : A less powerful version of ``np.multiply.outer``
+ that `ravel`\ s all inputs to 1D. This exists
+ primarily for compatibility with old code.
+
+ tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
+ ``np.multiply.outer(a, b)`` behave same for all
+ dimensions of a and b.
+
+ Examples
+ --------
+ >>> np.multiply.outer([1, 2, 3], [4, 5, 6])
+ array([[ 4, 5, 6],
+ [ 8, 10, 12],
+ [12, 15, 18]])
+
+ A multi-dimensional example:
+
+ >>> A = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> A.shape
+ (2, 3)
+ >>> B = np.array([[1, 2, 3, 4]])
+ >>> B.shape
+ (1, 4)
+ >>> C = np.multiply.outer(A, B)
+ >>> C.shape; C
+ (2, 3, 1, 4)
+ array([[[[ 1, 2, 3, 4]],
+ [[ 2, 4, 6, 8]],
+ [[ 3, 6, 9, 12]]],
+ [[[ 4, 8, 12, 16]],
+ [[ 5, 10, 15, 20]],
+ [[ 6, 12, 18, 24]]]])
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('at',
+ """
+ at(a, indices, b=None, /)
+
+ Performs unbuffered in place operation on operand 'a' for elements
+ specified by 'indices'. For addition ufunc, this method is equivalent to
+ ``a[indices] += b``, except that results are accumulated for elements that
+ are indexed more than once. For example, ``a[[0,0]] += 1`` will only
+ increment the first element once because of buffering, whereas
+ ``add.at(a, [0,0], 1)`` will increment the first element twice.
+
+ Parameters
+ ----------
+ a : array_like
+ The array to perform in place operation on.
+ indices : array_like or tuple
+ Array like index object or slice object for indexing into first
+ operand. If first operand has multiple dimensions, indices can be a
+ tuple of array like index objects or slice objects.
+ b : array_like
+ Second operand for ufuncs requiring two operands. Operand must be
+ broadcastable over first operand after indexing or slicing.
+
+ Examples
+ --------
+ Set items 0 and 1 to their negative values:
+
+ >>> import numpy as np
+ >>> a = np.array([1, 2, 3, 4])
+ >>> np.negative.at(a, [0, 1])
+ >>> a
+ array([-1, -2, 3, 4])
+
+ Increment items 0 and 1, and increment item 2 twice:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> np.add.at(a, [0, 1, 2, 2], 1)
+ >>> a
+ array([2, 3, 5, 4])
+
+ Add items 0 and 1 in first array to second array,
+ and store results in first array:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> b = np.array([1, 2])
+ >>> np.add.at(a, [0, 1], b)
+ >>> a
+ array([2, 4, 3, 4])
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes',
+ """
+ resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False)
+
+ Find the dtypes NumPy will use for the operation. Both input and
+ output dtypes are returned and may differ from those provided.
+
+ .. note::
+
+ This function always applies NEP 50 rules since it is not provided
+ any actual values. The Python types ``int``, ``float``, and
+ ``complex`` thus behave weak and should be passed for "untyped"
+ Python input.
+
+ Parameters
+ ----------
+ dtypes : tuple of dtypes, None, or literal int, float, complex
+ The input dtypes for each operand. Output operands can be
+ None, indicating that the dtype must be found.
+ signature : tuple of DTypes or None, optional
+ If given, enforces exact DType (classes) of the specific operand.
+ The ufunc ``dtype`` argument is equivalent to passing a tuple with
+ only output dtypes set.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ The casting mode when casting is necessary. This is identical to
+ the ufunc call casting modes.
+ reduction : boolean
+ If given, the resolution assumes a reduce operation is happening
+ which slightly changes the promotion and type resolution rules.
+ `dtypes` is usually something like ``(None, np.dtype("i2"), None)``
+ for reductions (first input is also the output).
+
+ .. note::
+
+ The default casting mode is "same_kind", however, as of
+ NumPy 1.24, NumPy uses "unsafe" for reductions.
+
+ Returns
+ -------
+ dtypes : tuple of dtypes
+ The dtypes which NumPy would use for the calculation. Note that
+ dtypes may not match the passed in ones (casting is necessary).
+
+
+ Examples
+ --------
+ This API requires passing dtypes, define them for convenience:
+
+ >>> import numpy as np
+ >>> int32 = np.dtype("int32")
+ >>> float32 = np.dtype("float32")
+
+ The typical ufunc call does not pass an output dtype. `numpy.add` has two
+ inputs and one output, so leave the output as ``None`` (not provided):
+
+ >>> np.add.resolve_dtypes((int32, float32, None))
+ (dtype('float64'), dtype('float64'), dtype('float64'))
+
+ The loop found uses "float64" for all operands (including the output), the
+ first input would be cast.
+
+ ``resolve_dtypes`` supports "weak" handling for Python scalars by passing
+ ``int``, ``float``, or ``complex``:
+
+ >>> np.add.resolve_dtypes((float32, float, None))
+ (dtype('float32'), dtype('float32'), dtype('float32'))
+
+ Where the Python ``float`` behaves similar to a Python value ``0.0``
+ in a ufunc call. (See :ref:`NEP 50 ` for details.)
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context',
+ """
+ _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False)
+
+ See `numpy.ufunc.resolve_dtypes` for parameter information. This
+ function is considered *unstable*. You may use it, but the returned
+ information is NumPy version specific and expected to change.
+ Large API/ABI changes are not expected, but a new NumPy version is
+ expected to require updating code using this functionality.
+
+ This function is designed to be used in conjunction with
+ `numpy.ufunc._get_strided_loop`. The calls are split to mirror the C API
+ and allow future improvements.
+
+ Returns
+ -------
+ dtypes : tuple of dtypes
+ call_info :
+ PyCapsule with all necessary information to get access to low level
+ C calls. See `numpy.ufunc._get_strided_loop` for more information.
+
+ """))
+
+add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop',
+ """
+ _get_strided_loop(call_info, /, *, fixed_strides=None)
+
+ This function fills in the ``call_info`` capsule to include all
+ information necessary to call the low-level strided loop from NumPy.
+
+ See notes for more information.
+
+ Parameters
+ ----------
+ call_info : PyCapsule
+ The PyCapsule returned by `numpy.ufunc._resolve_dtypes_and_context`.
+ fixed_strides : tuple of int or None, optional
+ A tuple with fixed byte strides of all input arrays. NumPy may use
+ this information to find specialized loops, so any call must follow
+ the given stride. Use ``None`` to indicate that the stride is not
+ known (or not fixed) for all calls.
+
+ Notes
+ -----
+ Together with `numpy.ufunc._resolve_dtypes_and_context` this function
+ gives low-level access to the NumPy ufunc loops.
+ The first function does general preparation and returns the required
+ information. It returns this as a C capsule with the version specific
+ name ``numpy_1.24_ufunc_call_info``.
+ The NumPy 1.24 ufunc call info capsule has the following layout::
+
+ typedef struct {
+ PyArrayMethod_StridedLoop *strided_loop;
+ PyArrayMethod_Context *context;
+ NpyAuxData *auxdata;
+
+ /* Flag information (expected to change) */
+ npy_bool requires_pyapi; /* GIL is required by loop */
+
+ /* Loop doesn't set FPE flags; if not set check FPE flags */
+ npy_bool no_floatingpoint_errors;
+ } ufunc_call_info;
+
+ Note that the first call only fills in the ``context``. The call to
+ ``_get_strided_loop`` fills in all other data. The main thing to note is
+ that the new-style loops return 0 on success, -1 on failure. They are
+ passed context as new first input and ``auxdata`` as (replaced) last.
+
+ Only the ``strided_loop``signature is considered guaranteed stable
+ for NumPy bug-fix releases. All other API is tied to the experimental
+ API versioning.
+
+ The reason for the split call is that cast information is required to
+ decide what the fixed-strides will be.
+
+ NumPy ties the lifetime of the ``auxdata`` information to the capsule.
+
+ """))
+
+
+##############################################################################
+#
+# Documentation for dtype attributes and methods
+#
+##############################################################################
+
+##############################################################################
+#
+# dtype object
+#
+##############################################################################
+
+add_newdoc('numpy._core.multiarray', 'dtype',
+ """
+ dtype(dtype, align=False, copy=False, [metadata])
+
+ Create a data type object.
+
+ A numpy array is homogeneous, and contains elements described by a
+ dtype object. A dtype object can be constructed from different
+ combinations of fundamental numeric types.
+
+ Parameters
+ ----------
+ dtype
+ Object to be converted to a data type object.
+ align : bool, optional
+ Add padding to the fields to match what a C compiler would output
+ for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
+ or a comma-separated string. If a struct dtype is being created,
+ this also sets a sticky alignment flag ``isalignedstruct``.
+ copy : bool, optional
+ Make a new copy of the data-type object. If ``False``, the result
+ may just be a reference to a built-in data-type object.
+ metadata : dict, optional
+ An optional dictionary with dtype metadata.
+
+ See also
+ --------
+ result_type
+
+ Examples
+ --------
+ Using array-scalar type:
+
+ >>> import numpy as np
+ >>> np.dtype(np.int16)
+ dtype('int16')
+
+ Structured type, one field name 'f1', containing int16:
+
+ >>> np.dtype([('f1', np.int16)])
+ dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])])
+ dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
+ dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')])
+ dtype([('a', '>> np.dtype("i4, (2,3)f8")
+ dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
+ dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
+ dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
+
+ Using dictionaries. Two fields named 'gender' and 'age':
+
+ >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
+ dtype([('gender', 'S1'), ('age', 'u1')])
+
+ Offsets in bytes, here 0 and 25:
+
+ >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
+ dtype([('surname', 'S25'), ('age', 'u1')])
+
+ """)
+
+##############################################################################
+#
+# dtype attributes
+#
+##############################################################################
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('alignment',
+ """
+ The required alignment (bytes) of this data-type according to the compiler.
+
+ More information is available in the C-API section of the manual.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> x = np.dtype('i4')
+ >>> x.alignment
+ 4
+
+ >>> x = np.dtype(float)
+ >>> x.alignment
+ 8
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('byteorder',
+ """
+ A character indicating the byte-order of this data-type object.
+
+ One of:
+
+ === ==============
+ '=' native
+ '<' little-endian
+ '>' big-endian
+ '|' not applicable
+ === ==============
+
+ All built-in data-type objects have byteorder either '=' or '|'.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> dt = np.dtype('i2')
+ >>> dt.byteorder
+ '='
+ >>> # endian is not relevant for 8 bit numbers
+ >>> np.dtype('i1').byteorder
+ '|'
+ >>> # or ASCII strings
+ >>> np.dtype('S2').byteorder
+ '|'
+ >>> # Even if specific code is given, and it is native
+ >>> # '=' is the byteorder
+ >>> import sys
+ >>> sys_is_le = sys.byteorder == 'little'
+ >>> native_code = '<' if sys_is_le else '>'
+ >>> swapped_code = '>' if sys_is_le else '<'
+ >>> dt = np.dtype(native_code + 'i2')
+ >>> dt.byteorder
+ '='
+ >>> # Swapped code shows up as itself
+ >>> dt = np.dtype(swapped_code + 'i2')
+ >>> dt.byteorder == swapped_code
+ True
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('char',
+ """A unique character code for each of the 21 different built-in types.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> x = np.dtype(float)
+ >>> x.char
+ 'd'
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('descr',
+ """
+ `__array_interface__` description of the data-type.
+
+ The format is that required by the 'descr' key in the
+ `__array_interface__` attribute.
+
+ Warning: This attribute exists specifically for `__array_interface__`,
+ and passing it directly to `numpy.dtype` will not accurately reconstruct
+ some dtypes (e.g., scalar and subarray dtypes).
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> x = np.dtype(float)
+ >>> x.descr
+ [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.descr
+ [('name', '>> import numpy as np
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> print(dt.fields)
+ {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)}
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('flags',
+ """
+ Bit-flags describing how this data type is to be interpreted.
+
+ Bit-masks are in ``numpy._core.multiarray`` as the constants
+ `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
+ `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
+ of these flags is in C-API documentation; they are largely useful
+ for user-defined data-types.
+
+ The following example demonstrates that operations on this particular
+ dtype requires Python C-API.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+ >>> x.flags
+ 16
+ >>> np._core.multiarray.NEEDS_PYAPI
+ 16
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('hasobject',
+ """
+ Boolean indicating whether this dtype contains any reference-counted
+ objects in any fields or sub-dtypes.
+
+ Recall that what is actually in the ndarray memory representing
+ the Python object is the memory address of that object (a pointer).
+ Special handling may be required, and this attribute is useful for
+ distinguishing data types that may contain arbitrary Python objects
+ and data-types that won't.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('isbuiltin',
+ """
+ Integer indicating how this dtype relates to the built-in dtypes.
+
+ Read-only.
+
+ = ========================================================================
+ 0 if this is a structured array type, with fields
+ 1 if this is a dtype compiled into numpy (such as ints, floats etc)
+ 2 if the dtype is for a user-defined numpy type
+ A user-defined type uses the numpy C-API machinery to extend
+ numpy to handle a new array type. See
+ :ref:`user.user-defined-data-types` in the NumPy manual.
+ = ========================================================================
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> dt = np.dtype('i2')
+ >>> dt.isbuiltin
+ 1
+ >>> dt = np.dtype('f8')
+ >>> dt.isbuiltin
+ 1
+ >>> dt = np.dtype([('field1', 'f8')])
+ >>> dt.isbuiltin
+ 0
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('isnative',
+ """
+ Boolean indicating whether the byte order of this dtype is native
+ to the platform.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('isalignedstruct',
+ """
+ Boolean indicating whether the dtype is a struct which maintains
+ field alignment. This flag is sticky, so when combining multiple
+ structs together, it is preserved and produces new dtypes which
+ are also aligned.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('itemsize',
+ """
+ The element size of this data-type object.
+
+ For 18 of the 21 types this number is fixed by the data-type.
+ For the flexible data-types, this number can be anything.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> arr = np.array([[1, 2], [3, 4]])
+ >>> arr.dtype
+ dtype('int64')
+ >>> arr.itemsize
+ 8
+
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.itemsize
+ 80
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('kind',
+ """
+ A character code (one of 'biufcmMOSTUV') identifying the general kind of data.
+
+ = ======================
+ b boolean
+ i signed integer
+ u unsigned integer
+ f floating-point
+ c complex floating-point
+ m timedelta
+ M datetime
+ O object
+ S (byte-)string
+ T string (StringDType)
+ U Unicode
+ V void
+ = ======================
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> dt = np.dtype('i4')
+ >>> dt.kind
+ 'i'
+ >>> dt = np.dtype('f8')
+ >>> dt.kind
+ 'f'
+ >>> dt = np.dtype([('field1', 'f8')])
+ >>> dt.kind
+ 'V'
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('metadata',
+ """
+ Either ``None`` or a readonly dictionary of metadata (mappingproxy).
+
+ The metadata field can be set using any dictionary at data-type
+ creation. NumPy currently has no uniform approach to propagating
+ metadata; although some array operations preserve it, there is no
+ guarantee that others will.
+
+ .. warning::
+
+ Although used in certain projects, this feature was long undocumented
+ and is not well supported. Some aspects of metadata propagation
+ are expected to change in the future.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> dt = np.dtype(float, metadata={"key": "value"})
+ >>> dt.metadata["key"]
+ 'value'
+ >>> arr = np.array([1, 2, 3], dtype=dt)
+ >>> arr.dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ Adding arrays with identical datatypes currently preserves the metadata:
+
+ >>> (arr + arr).dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ If the arrays have different dtype metadata, the first one wins:
+
+ >>> dt2 = np.dtype(float, metadata={"key2": "value2"})
+ >>> arr2 = np.array([3, 2, 1], dtype=dt2)
+ >>> print((arr + arr2).dtype.metadata)
+ {'key': 'value'}
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('name',
+ """
+ A bit-width name for this data-type.
+
+ Un-sized flexible data-type objects do not have this attribute.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> x = np.dtype(float)
+ >>> x.name
+ 'float64'
+ >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+ >>> x.name
+ 'void640'
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('names',
+ """
+ Ordered list of field names, or ``None`` if there are no fields.
+
+ The names are ordered according to increasing byte offset. This can be
+ used, for example, to walk through all of the named fields in offset order.
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.names
+ ('name', 'grades')
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('num',
+ """
+ A unique number for each of the 21 different built-in types.
+
+ These are roughly ordered from least-to-most precision.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> dt = np.dtype(str)
+ >>> dt.num
+ 19
+
+ >>> dt = np.dtype(float)
+ >>> dt.num
+ 12
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('shape',
+ """
+ Shape tuple of the sub-array if this data type describes a sub-array,
+ and ``()`` otherwise.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> dt = np.dtype(('i4', 4))
+ >>> dt.shape
+ (4,)
+
+ >>> dt = np.dtype(('i4', (2, 3)))
+ >>> dt.shape
+ (2, 3)
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('ndim',
+ """
+ Number of dimensions of the sub-array if this data type describes a
+ sub-array, and ``0`` otherwise.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.dtype(float)
+ >>> x.ndim
+ 0
+
+ >>> x = np.dtype((float, 8))
+ >>> x.ndim
+ 1
+
+ >>> x = np.dtype(('i4', (3, 4)))
+ >>> x.ndim
+ 2
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('str',
+ """The array-protocol typestring of this data-type object."""))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('subdtype',
+ """
+ Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
+ None otherwise.
+
+ The *shape* is the fixed shape of the sub-array described by this
+ data type, and *item_dtype* the data type of the array.
+
+ If a field whose dtype object has this attribute is retrieved,
+ then the extra dimensions implied by *shape* are tacked on to
+ the end of the retrieved array.
+
+ See Also
+ --------
+ dtype.base
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.dtype('8f')
+ >>> x.subdtype
+ (dtype('float32'), (8,))
+
+ >>> x = np.dtype('i2')
+ >>> x.subdtype
+ >>>
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('base',
+ """
+ Returns dtype for the base element of the subarrays,
+ regardless of their dimension or shape.
+
+ See Also
+ --------
+ dtype.subdtype
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.dtype('8f')
+ >>> x.base
+ dtype('float32')
+
+ >>> x = np.dtype('i2')
+ >>> x.base
+ dtype('int16')
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('type',
+ """The type object used to instantiate a scalar of this data-type."""))
+
+##############################################################################
+#
+# dtype methods
+#
+##############################################################################
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder',
+ """
+ newbyteorder(new_order='S', /)
+
+ Return a new dtype with a different byte order.
+
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ Parameters
+ ----------
+ new_order : string, optional
+ Byte order to force; a value from the byte order specifications
+ below. The default value ('S') results in swapping the current
+ byte order. `new_order` codes can be any of:
+
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'little'} - little endian
+ * {'>', 'big'} - big endian
+ * {'=', 'native'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
+
+ Returns
+ -------
+ new_dtype : dtype
+ New dtype object with the given change to the byte order.
+
+ Notes
+ -----
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ Examples
+ --------
+ >>> import sys
+ >>> sys_is_le = sys.byteorder == 'little'
+ >>> native_code = '<' if sys_is_le else '>'
+ >>> swapped_code = '>' if sys_is_le else '<'
+ >>> import numpy as np
+ >>> native_dt = np.dtype(native_code+'i2')
+ >>> swapped_dt = np.dtype(swapped_code+'i2')
+ >>> native_dt.newbyteorder('S') == swapped_dt
+ True
+ >>> native_dt.newbyteorder() == swapped_dt
+ True
+ >>> native_dt == swapped_dt.newbyteorder('S')
+ True
+ >>> native_dt == swapped_dt.newbyteorder('=')
+ True
+ >>> native_dt == swapped_dt.newbyteorder('N')
+ True
+ >>> native_dt == native_dt.newbyteorder('|')
+ True
+ >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>')
+ True
+ >>> np.dtype('>i2') == native_dt.newbyteorder('B')
+ True
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('__class_getitem__',
+ """
+ __class_getitem__(item, /)
+
+ Return a parametrized wrapper around the `~numpy.dtype` type.
+
+ .. versionadded:: 1.22
+
+ Returns
+ -------
+ alias : types.GenericAlias
+ A parametrized `~numpy.dtype` type.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> np.dtype[np.int64]
+ numpy.dtype[numpy.int64]
+
+ See Also
+ --------
+ :pep:`585` : Type hinting generics in standard collections.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('__ge__',
+ """
+ __ge__(value, /)
+
+ Return ``self >= value``.
+
+ Equivalent to ``np.can_cast(value, self, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('__le__',
+ """
+ __le__(value, /)
+
+ Return ``self <= value``.
+
+ Equivalent to ``np.can_cast(self, value, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__',
+ """
+ __ge__(value, /)
+
+ Return ``self > value``.
+
+ Equivalent to
+ ``self != value and np.can_cast(value, self, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy._core.multiarray', 'dtype', ('__lt__',
+ """
+ __lt__(value, /)
+
+ Return ``self < value``.
+
+ Equivalent to
+ ``self != value and np.can_cast(self, value, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+##############################################################################
+#
+# Datetime-related Methods
+#
+##############################################################################
+
+add_newdoc('numpy._core.multiarray', 'busdaycalendar',
+ """
+ busdaycalendar(weekmask='1111100', holidays=None)
+
+ A business day calendar object that efficiently stores information
+ defining valid days for the busday family of functions.
+
+ The default valid days are Monday through Friday ("business days").
+ A busdaycalendar object can be specified with any set of weekly
+ valid days, plus an optional "holiday" dates that always will be invalid.
+
+ Once a busdaycalendar object is created, the weekmask and holidays
+ cannot be modified.
+
+ Parameters
+ ----------
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates, no matter which
+ weekday they fall upon. Holiday dates may be specified in any
+ order, and NaT (not-a-time) dates are ignored. This list is
+ saved in a normalized form that is suited for fast calculations
+ of valid days.
+
+ Returns
+ -------
+ out : busdaycalendar
+ A business day calendar object containing the specified
+ weekmask and holidays values.
+
+ See Also
+ --------
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Attributes
+ ----------
+ weekmask : (copy) seven-element array of bool
+ holidays : (copy) sorted array of datetime64[D]
+
+ Notes
+ -----
+ Once a busdaycalendar object is created, you cannot modify the
+ weekmask or holidays. The attributes return copies of internal data.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> # Some important days in July
+ ... bdd = np.busdaycalendar(
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ >>> # Default is Monday to Friday weekdays
+ ... bdd.weekmask
+ array([ True, True, True, True, True, False, False])
+ >>> # Any holidays already on the weekend are removed
+ ... bdd.holidays
+ array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
+ """)
+
+add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('weekmask',
+ """A copy of the seven-element boolean mask indicating valid days."""))
+
+add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('holidays',
+ """A copy of the holiday array indicating additional invalid days."""))
+
+add_newdoc('numpy._core.multiarray', 'normalize_axis_index',
+ """
+ normalize_axis_index(axis, ndim, msg_prefix=None)
+
+ Normalizes an axis index, `axis`, such that is a valid positive index into
+ the shape of array with `ndim` dimensions. Raises an AxisError with an
+ appropriate message if this is not possible.
+
+ Used internally by all axis-checking logic.
+
+ Parameters
+ ----------
+ axis : int
+ The un-normalized index of the axis. Can be negative
+ ndim : int
+ The number of dimensions of the array that `axis` should be normalized
+ against
+ msg_prefix : str
+ A prefix to put before the message, typically the name of the argument
+
+ Returns
+ -------
+ normalized_axis : int
+ The normalized axis index, such that `0 <= normalized_axis < ndim`
+
+ Raises
+ ------
+ AxisError
+ If the axis index is invalid, when `-ndim <= axis < ndim` is false.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib.array_utils import normalize_axis_index
+ >>> normalize_axis_index(0, ndim=3)
+ 0
+ >>> normalize_axis_index(1, ndim=3)
+ 1
+ >>> normalize_axis_index(-1, ndim=3)
+ 2
+
+ >>> normalize_axis_index(3, ndim=3)
+ Traceback (most recent call last):
+ ...
+ numpy.exceptions.AxisError: axis 3 is out of bounds for array ...
+ >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
+ Traceback (most recent call last):
+ ...
+ numpy.exceptions.AxisError: axes_arg: axis -4 is out of bounds ...
+ """)
+
+add_newdoc('numpy._core.multiarray', 'datetime_data',
+ """
+ datetime_data(dtype, /)
+
+ Get information about the step size of a date or time type.
+
+ The returned tuple can be passed as the second argument of `numpy.datetime64` and
+ `numpy.timedelta64`.
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype object, which must be a `datetime64` or `timedelta64` type.
+
+ Returns
+ -------
+ unit : str
+ The :ref:`datetime unit ` on which this dtype
+ is based.
+ count : int
+ The number of base units in a step.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> dt_25s = np.dtype('timedelta64[25s]')
+ >>> np.datetime_data(dt_25s)
+ ('s', 25)
+ >>> np.array(10, dt_25s).astype('timedelta64[s]')
+ array(250, dtype='timedelta64[s]')
+
+ The result can be used to construct a datetime that uses the same units
+ as a timedelta
+
+ >>> np.datetime64('2010', np.datetime_data(dt_25s))
+ np.datetime64('2010-01-01T00:00:00','25s')
+ """)
+
+
+##############################################################################
+#
+# Documentation for `generic` attributes and methods
+#
+##############################################################################
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ """
+ Base class for numpy scalar types.
+
+ Class from which most (all?) numpy scalar types are derived. For
+ consistency, exposes the same API as `ndarray`, despite many
+ consequent attributes being either "get-only," or completely irrelevant.
+ This is the class from which it is strongly suggested users should derive
+ custom scalar types.
+
+ """)
+
+# Attributes
+
+def refer_to_array_attribute(attr, method=True):
+ docstring = """
+ Scalar {} identical to the corresponding array attribute.
+
+ Please see `ndarray.{}`.
+ """
+
+ return attr, docstring.format("method" if method else "attribute", attr)
+
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('T', method=False))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('base', method=False))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('data',
+ """Pointer to start of data."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('dtype',
+ """Get array data-descriptor."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('flags',
+ """The integer value of flags."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('flat',
+ """A 1-D view of the scalar."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('imag',
+ """The imaginary part of the scalar."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('itemsize',
+ """The length of one element in bytes."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('ndim',
+ """The number of array dimensions."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('real',
+ """The real part of the scalar."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('shape',
+ """Tuple of array dimensions."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('size',
+ """The number of elements in the gentype."""))
+
+add_newdoc('numpy._core.numerictypes', 'generic', ('strides',
+ """Tuple of bytes steps in each dimension."""))
+
+# Methods
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('all'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('any'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('argmax'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('argmin'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('argsort'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('astype'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('byteswap'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('choose'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('clip'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('compress'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('conjugate'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('copy'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('cumprod'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('cumsum'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('diagonal'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('dump'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('dumps'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('fill'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('flatten'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('getfield'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('item'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('max'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('mean'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('min'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('nonzero'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('prod'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('put'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('ravel'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('repeat'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('reshape'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('resize'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('round'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('searchsorted'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('setfield'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('setflags'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('sort'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('squeeze'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('std'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('sum'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('swapaxes'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('take'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('tofile'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('tolist'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('tostring'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('trace'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('transpose'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('var'))
+
+add_newdoc('numpy._core.numerictypes', 'generic',
+ refer_to_array_attribute('view'))
+
+add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__',
+ """
+ __class_getitem__(item, /)
+
+ Return a parametrized wrapper around the `~numpy.number` type.
+
+ .. versionadded:: 1.22
+
+ Returns
+ -------
+ alias : types.GenericAlias
+ A parametrized `~numpy.number` type.
+
+ Examples
+ --------
+ >>> from typing import Any
+ >>> import numpy as np
+
+ >>> np.signedinteger[Any]
+ numpy.signedinteger[typing.Any]
+
+ See Also
+ --------
+ :pep:`585` : Type hinting generics in standard collections.
+
+ """))
+
+##############################################################################
+#
+# Documentation for scalar type abstract base classes in type hierarchy
+#
+##############################################################################
+
+
+add_newdoc('numpy._core.numerictypes', 'number',
+ """
+ Abstract base class of all numeric scalar types.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'integer',
+ """
+ Abstract base class of all integer scalar types.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'signedinteger',
+ """
+ Abstract base class of all signed integer scalar types.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'unsignedinteger',
+ """
+ Abstract base class of all unsigned integer scalar types.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'inexact',
+ """
+ Abstract base class of all numeric scalar types with a (potentially)
+ inexact representation of the values in its range, such as
+ floating-point numbers.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'floating',
+ """
+ Abstract base class of all floating-point scalar types.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'complexfloating',
+ """
+ Abstract base class of all complex number scalar types that are made up of
+ floating-point numbers.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'flexible',
+ """
+ Abstract base class of all scalar types without predefined length.
+ The actual size of these types depends on the specific `numpy.dtype`
+ instantiation.
+
+ """)
+
+add_newdoc('numpy._core.numerictypes', 'character',
+ """
+ Abstract base class of all character string scalar types.
+
+ """)
+
+add_newdoc('numpy._core.multiarray', 'StringDType',
+ """
+ StringDType(*, na_object=np._NoValue, coerce=True)
+
+ Create a StringDType instance.
+
+ StringDType can be used to store UTF-8 encoded variable-width strings in
+ a NumPy array.
+
+ Parameters
+ ----------
+ na_object : object, optional
+ Object used to represent missing data. If unset, the array will not
+ use a missing data sentinel.
+ coerce : bool, optional
+ Whether or not items in an array-like passed to an array creation
+ function that are neither a str or str subtype should be coerced to
+ str. Defaults to True. If set to False, creating a StringDType
+ array from an array-like containing entries that are not already
+ strings will raise an error.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+
+ >>> from numpy.dtypes import StringDType
+ >>> np.array(["hello", "world"], dtype=StringDType())
+ array(["hello", "world"], dtype=StringDType())
+
+ >>> arr = np.array(["hello", None, "world"],
+ ... dtype=StringDType(na_object=None))
+ >>> arr
+ array(["hello", None, "world"], dtype=StringDType(na_object=None))
+ >>> arr[1] is None
+ True
+
+ >>> arr = np.array(["hello", np.nan, "world"],
+ ... dtype=StringDType(na_object=np.nan))
+ >>> np.isnan(arr)
+ array([False, True, False])
+
+ >>> np.array([1.2, object(), "hello world"],
+ ... dtype=StringDType(coerce=False))
+ Traceback (most recent call last):
+ ...
+ ValueError: StringDType only allows string data when string coercion is disabled.
+
+ >>> np.array(["hello", "world"], dtype=StringDType(coerce=True))
+ array(["hello", "world"], dtype=StringDType(coerce=True))
+ """)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.pyi
new file mode 100644
index 00000000..b23c3b1a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs.pyi
@@ -0,0 +1,3 @@
+from .overrides import get_array_function_like_doc as get_array_function_like_doc
+
+def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.py b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.py
new file mode 100644
index 00000000..96170d80
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.py
@@ -0,0 +1,390 @@
+"""
+This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
+our sphinx ``conf.py`` during doc builds, where we want to avoid showing
+platform-dependent information.
+"""
+import os
+import sys
+
+from numpy._core import dtype
+from numpy._core import numerictypes as _numerictypes
+from numpy._core.function_base import add_newdoc
+
+##############################################################################
+#
+# Documentation for concrete scalar classes
+#
+##############################################################################
+
+def numeric_type_aliases(aliases):
+ def type_aliases_gen():
+ for alias, doc in aliases:
+ try:
+ alias_type = getattr(_numerictypes, alias)
+ except AttributeError:
+ # The set of aliases that actually exist varies between platforms
+ pass
+ else:
+ yield (alias_type, alias, doc)
+ return list(type_aliases_gen())
+
+
+possible_aliases = numeric_type_aliases([
+ ('int8', '8-bit signed integer (``-128`` to ``127``)'),
+ ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
+ ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
+ ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
+ ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
+ ('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
+ ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
+ ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
+ ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
+ ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
+ ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
+ ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
+ ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
+ ('float96', '96-bit extended-precision floating-point number type'),
+ ('float128', '128-bit extended-precision floating-point number type'),
+ ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
+ ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
+ ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
+ ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
+ ])
+
+
+def _get_platform_and_machine():
+ try:
+ system, _, _, _, machine = os.uname()
+ except AttributeError:
+ system = sys.platform
+ if system == 'win32':
+ machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
+ or os.environ.get('PROCESSOR_ARCHITECTURE', '')
+ else:
+ machine = 'unknown'
+ return system, machine
+
+
+_system, _machine = _get_platform_and_machine()
+_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
+
+
+def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
+ # note: `:field: value` is rST syntax which renders as field lists.
+ o = getattr(_numerictypes, obj)
+
+ character_code = dtype(o).char
+ canonical_name_doc = "" if obj == o.__name__ else \
+ f":Canonical name: `numpy.{obj}`\n "
+ if fixed_aliases:
+ alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
+ for alias in fixed_aliases)
+ else:
+ alias_doc = ''
+ alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
+ for (alias_type, alias, doc) in possible_aliases if alias_type is o)
+
+ docstring = f"""
+ {doc.strip()}
+
+ :Character code: ``'{character_code}'``
+ {canonical_name_doc}{alias_doc}
+ """
+
+ add_newdoc('numpy._core.numerictypes', obj, docstring)
+
+
+_bool_docstring = (
+ """
+ Boolean type (True or False), stored as a byte.
+
+ .. warning::
+
+ The :class:`bool` type is not a subclass of the :class:`int_` type
+ (the :class:`bool` is not even a number type). This is different
+ than Python's default implementation of :class:`bool` as a
+ sub-class of :class:`int`.
+ """
+)
+
+add_newdoc_for_scalar_type('bool', [], _bool_docstring)
+
+add_newdoc_for_scalar_type('bool_', [], _bool_docstring)
+
+add_newdoc_for_scalar_type('byte', [],
+ """
+ Signed integer type, compatible with C ``char``.
+ """)
+
+add_newdoc_for_scalar_type('short', [],
+ """
+ Signed integer type, compatible with C ``short``.
+ """)
+
+add_newdoc_for_scalar_type('intc', [],
+ """
+ Signed integer type, compatible with C ``int``.
+ """)
+
+# TODO: These docs probably need an if to highlight the default rather than
+# the C-types (and be correct).
+add_newdoc_for_scalar_type('int_', [],
+ """
+ Default signed integer type, 64bit on 64bit systems and 32bit on 32bit
+ systems.
+ """)
+
+add_newdoc_for_scalar_type('longlong', [],
+ """
+ Signed integer type, compatible with C ``long long``.
+ """)
+
+add_newdoc_for_scalar_type('ubyte', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned char``.
+ """)
+
+add_newdoc_for_scalar_type('ushort', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned short``.
+ """)
+
+add_newdoc_for_scalar_type('uintc', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned int``.
+ """)
+
+add_newdoc_for_scalar_type('uint', [],
+ """
+ Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit
+ systems.
+ """)
+
+add_newdoc_for_scalar_type('ulonglong', [],
+ """
+ Signed integer type, compatible with C ``unsigned long long``.
+ """)
+
+add_newdoc_for_scalar_type('half', [],
+ """
+ Half-precision floating-point number type.
+ """)
+
+add_newdoc_for_scalar_type('single', [],
+ """
+ Single-precision floating-point number type, compatible with C ``float``.
+ """)
+
+add_newdoc_for_scalar_type('double', [],
+ """
+ Double-precision floating-point number type, compatible with Python
+ :class:`float` and C ``double``.
+ """)
+
+add_newdoc_for_scalar_type('longdouble', [],
+ """
+ Extended-precision floating-point number type, compatible with C
+ ``long double`` but not necessarily with IEEE 754 quadruple-precision.
+ """)
+
+add_newdoc_for_scalar_type('csingle', [],
+ """
+ Complex number type composed of two single-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('cdouble', [],
+ """
+ Complex number type composed of two double-precision floating-point
+ numbers, compatible with Python :class:`complex`.
+ """)
+
+add_newdoc_for_scalar_type('clongdouble', [],
+ """
+ Complex number type composed of two extended-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('object_', [],
+ """
+ Any Python object.
+ """)
+
+add_newdoc_for_scalar_type('str_', [],
+ r"""
+ A unicode string.
+
+ This type strips trailing null codepoints.
+
+ >>> s = np.str_("abc\x00")
+ >>> s
+ 'abc'
+
+ Unlike the builtin :class:`str`, this supports the
+ :ref:`python:bufferobjects`, exposing its contents as UCS4:
+
+ >>> m = memoryview(np.str_("abc"))
+ >>> m.format
+ '3w'
+ >>> m.tobytes()
+ b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
+ """)
+
+add_newdoc_for_scalar_type('bytes_', [],
+ r"""
+ A byte string.
+
+ When used in arrays, this type strips trailing null bytes.
+ """)
+
+add_newdoc_for_scalar_type('void', [],
+ r"""
+ np.void(length_or_data, /, dtype=None)
+
+ Create a new structured or unstructured void scalar.
+
+ Parameters
+ ----------
+ length_or_data : int, array-like, bytes-like, object
+ One of multiple meanings (see notes). The length or
+ bytes data of an unstructured void. Or alternatively,
+ the data to be stored in the new scalar when `dtype`
+ is provided.
+ This can be an array-like, in which case an array may
+ be returned.
+ dtype : dtype, optional
+ If provided the dtype of the new scalar. This dtype must
+ be "void" dtype (i.e. a structured or unstructured void,
+ see also :ref:`defining-structured-types`).
+
+ .. versionadded:: 1.24
+
+ Notes
+ -----
+ For historical reasons and because void scalars can represent both
+ arbitrary byte data and structured dtypes, the void constructor
+ has three calling conventions:
+
+ 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
+ ``\0`` bytes. The 5 can be a Python or NumPy integer.
+ 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
+ The dtype itemsize will match the byte string length, here ``"V10"``.
+ 3. When a ``dtype=`` is passed the call is roughly the same as an
+ array creation. However, a void scalar rather than array is returned.
+
+ Please see the examples which show all three different conventions.
+
+ Examples
+ --------
+ >>> np.void(5)
+ np.void(b'\x00\x00\x00\x00\x00')
+ >>> np.void(b'abcd')
+ np.void(b'\x61\x62\x63\x64')
+ >>> np.void((3.2, b'eggs'), dtype="d,S5")
+ np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
+ np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
+
+ """)
+
+add_newdoc_for_scalar_type('datetime64', [],
+ """
+ If created from a 64-bit integer, it represents an offset from
+ ``1970-01-01T00:00:00``.
+ If created from string, the string can be in ISO 8601 date
+ or datetime format.
+
+ When parsing a string to create a datetime object, if the string contains
+ a trailing timezone (A 'Z' or a timezone offset), the timezone will be
+ dropped and a User Warning is given.
+
+ Datetime64 objects should be considered to be UTC and therefore have an
+ offset of +0000.
+
+ >>> np.datetime64(10, 'Y')
+ np.datetime64('1980')
+ >>> np.datetime64('1980', 'Y')
+ np.datetime64('1980')
+ >>> np.datetime64(10, 'D')
+ np.datetime64('1970-01-11')
+
+ See :ref:`arrays.datetime` for more information.
+ """)
+
+add_newdoc_for_scalar_type('timedelta64', [],
+ """
+ A timedelta stored as a 64-bit integer.
+
+ See :ref:`arrays.datetime` for more information.
+ """)
+
+add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
+ """
+ integer.is_integer() -> bool
+
+ Return ``True`` if the number is finite with integral value.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.int64(-2).is_integer()
+ True
+ >>> np.uint32(5).is_integer()
+ True
+ """))
+
+# TODO: work out how to put this on the base class, np.floating
+for float_name in ('half', 'single', 'double', 'longdouble'):
+ add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',
+ f"""
+ {float_name}.as_integer_ratio() -> (int, int)
+
+ Return a pair of integers, whose ratio is exactly equal to the original
+ floating point number, and with a positive denominator.
+ Raise `OverflowError` on infinities and a `ValueError` on NaNs.
+
+ >>> np.{float_name}(10.0).as_integer_ratio()
+ (10, 1)
+ >>> np.{float_name}(0.0).as_integer_ratio()
+ (0, 1)
+ >>> np.{float_name}(-.25).as_integer_ratio()
+ (-1, 4)
+ """))
+
+ add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',
+ f"""
+ {float_name}.is_integer() -> bool
+
+ Return ``True`` if the floating point number is finite with integral
+ value, and ``False`` otherwise.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> np.{float_name}(-2.0).is_integer()
+ True
+ >>> np.{float_name}(3.2).is_integer()
+ False
+ """))
+
+for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
+ 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
+ # Add negative examples for signed cases by checking typecode
+ add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',
+ f"""
+ {int_name}.bit_count() -> int
+
+ Computes the number of 1-bits in the absolute value of the input.
+ Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
+
+ Examples
+ --------
+ >>> np.{int_name}(127).bit_count()
+ 7""" +
+ (f"""
+ >>> np.{int_name}(-127).bit_count()
+ 7
+ """ if dtype(int_name).char.islower() else "")))
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.pyi
new file mode 100644
index 00000000..4a06c9b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_add_newdocs_scalars.pyi
@@ -0,0 +1,16 @@
+from collections.abc import Iterable
+from typing import Final
+
+import numpy as np
+
+possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ...
+_system: Final[str] = ...
+_machine: Final[str] = ...
+_doc_alias_string: Final[str] = ...
+_bool_docstring: Final[str] = ...
+int_name: str = ...
+float_name: str = ...
+
+def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ...
+def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ...
+def _get_platform_and_machine() -> tuple[str, str]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.py b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.py
new file mode 100644
index 00000000..613c5cf5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.py
@@ -0,0 +1,134 @@
+"""
+Functions in the ``as*array`` family that promote array-likes into arrays.
+
+`require` fits this category despite its name not matching this pattern.
+"""
+from .multiarray import array, asanyarray
+from .overrides import (
+ array_function_dispatch,
+ finalize_array_function_like,
+ set_module,
+)
+
+__all__ = ["require"]
+
+
+POSSIBLE_FLAGS = {
+ 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
+ 'A': 'A', 'ALIGNED': 'A',
+ 'W': 'W', 'WRITEABLE': 'W',
+ 'O': 'O', 'OWNDATA': 'O',
+ 'E': 'E', 'ENSUREARRAY': 'E'
+}
+
+
+@finalize_array_function_like
+@set_module('numpy')
+def require(a, dtype=None, requirements=None, *, like=None):
+ """
+ Return an ndarray of the provided type that satisfies requirements.
+
+ This function is useful to be sure that an array with the correct flags
+ is returned for passing to compiled code (perhaps through ctypes).
+
+ Parameters
+ ----------
+ a : array_like
+ The object to be converted to a type-and-requirement-satisfying array.
+ dtype : data-type
+ The required data-type. If None preserve the current dtype. If your
+ application requires the data to be in native byteorder, include
+ a byteorder specification as a part of the dtype specification.
+ requirements : str or sequence of str
+ The requirements list can be any of the following
+
+ * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
+ * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
+ * 'ALIGNED' ('A') - ensure a data-type aligned array
+ * 'WRITEABLE' ('W') - ensure a writable array
+ * 'OWNDATA' ('O') - ensure an array that owns its own data
+ * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array with specified requirements and type if given.
+
+ See Also
+ --------
+ asarray : Convert input to an ndarray.
+ asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Notes
+ -----
+ The returned array will be guaranteed to have the listed requirements
+ by making a copy if needed.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(6).reshape(2,3)
+ >>> x.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : False
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+
+ >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
+ >>> y.flags
+ C_CONTIGUOUS : False
+ F_CONTIGUOUS : True
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+
+ """
+ if like is not None:
+ return _require_with_like(
+ like,
+ a,
+ dtype=dtype,
+ requirements=requirements,
+ )
+
+ if not requirements:
+ return asanyarray(a, dtype=dtype)
+
+ requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
+
+ if 'E' in requirements:
+ requirements.remove('E')
+ subok = False
+ else:
+ subok = True
+
+ order = 'A'
+ if requirements >= {'C', 'F'}:
+ raise ValueError('Cannot specify both "C" and "F" order')
+ elif 'F' in requirements:
+ order = 'F'
+ requirements.remove('F')
+ elif 'C' in requirements:
+ order = 'C'
+ requirements.remove('C')
+
+ arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)
+
+ for prop in requirements:
+ if not arr.flags[prop]:
+ return arr.copy(order)
+ return arr
+
+
+_require_with_like = array_function_dispatch()(require)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.pyi
new file mode 100644
index 00000000..a4bee004
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_asarray.pyi
@@ -0,0 +1,41 @@
+from collections.abc import Iterable
+from typing import Any, Literal, TypeAlias, TypeVar, overload
+
+from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc
+
+_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
+
+_Requirements: TypeAlias = Literal[
+ "C", "C_CONTIGUOUS", "CONTIGUOUS",
+ "F", "F_CONTIGUOUS", "FORTRAN",
+ "A", "ALIGNED",
+ "W", "WRITEABLE",
+ "O", "OWNDATA"
+]
+_E: TypeAlias = Literal["E", "ENSUREARRAY"]
+_RequirementsWithE: TypeAlias = _Requirements | _E
+
+@overload
+def require(
+ a: _ArrayT,
+ dtype: None = ...,
+ requirements: _Requirements | Iterable[_Requirements] | None = ...,
+ *,
+ like: _SupportsArrayFunc = ...
+) -> _ArrayT: ...
+@overload
+def require(
+ a: object,
+ dtype: DTypeLike = ...,
+ requirements: _E | Iterable[_RequirementsWithE] = ...,
+ *,
+ like: _SupportsArrayFunc = ...
+) -> NDArray[Any]: ...
+@overload
+def require(
+ a: object,
+ dtype: DTypeLike = ...,
+ requirements: _Requirements | Iterable[_Requirements] | None = ...,
+ *,
+ like: _SupportsArrayFunc = ...
+) -> NDArray[Any]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.py b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.py
new file mode 100644
index 00000000..6a8a091b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.py
@@ -0,0 +1,366 @@
+"""
+A place for code to be called from the implementation of np.dtype
+
+String handling is much easier to do correctly in python.
+"""
+import numpy as np
+
+_kind_to_stem = {
+ 'u': 'uint',
+ 'i': 'int',
+ 'c': 'complex',
+ 'f': 'float',
+ 'b': 'bool',
+ 'V': 'void',
+ 'O': 'object',
+ 'M': 'datetime',
+ 'm': 'timedelta',
+ 'S': 'bytes',
+ 'U': 'str',
+}
+
+
+def _kind_name(dtype):
+ try:
+ return _kind_to_stem[dtype.kind]
+ except KeyError as e:
+ raise RuntimeError(
+ f"internal dtype error, unknown kind {dtype.kind!r}"
+ ) from None
+
+
+def __str__(dtype):
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=True)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
+ return dtype.str
+ else:
+ return dtype.name
+
+
+def __repr__(dtype):
+ arg_str = _construction_repr(dtype, include_align=False)
+ if dtype.isalignedstruct:
+ arg_str = arg_str + ", align=True"
+ return f"dtype({arg_str})"
+
+
+def _unpack_field(dtype, offset, title=None):
+ """
+ Helper function to normalize the items in dtype.fields.
+
+ Call as:
+
+ dtype, offset, title = _unpack_field(*dtype.fields[name])
+ """
+ return dtype, offset, title
+
+
+def _isunsized(dtype):
+ # PyDataType_ISUNSIZED
+ return dtype.itemsize == 0
+
+
+def _construction_repr(dtype, include_align=False, short=False):
+ """
+ Creates a string repr of the dtype, excluding the 'dtype()' part
+ surrounding the object. This object may be a string, a list, or
+ a dict depending on the nature of the dtype. This
+ is the object passed as the first parameter to the dtype
+ constructor, and if no additional constructor parameters are
+ given, will reproduce the exact memory layout.
+
+ Parameters
+ ----------
+ short : bool
+ If true, this creates a shorter repr using 'kind' and 'itemsize',
+ instead of the longer type name.
+
+ include_align : bool
+ If true, this includes the 'align=True' parameter
+ inside the struct dtype construction dict when needed. Use this flag
+ if you want a proper repr string without the 'dtype()' part around it.
+
+ If false, this does not preserve the
+ 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
+ struct arrays like the regular repr does, because the 'align'
+ flag is not part of first dtype constructor parameter. This
+ mode is intended for a full 'repr', where the 'align=True' is
+ provided as the second parameter.
+ """
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=include_align)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ else:
+ return _scalar_str(dtype, short=short)
+
+
+def _scalar_str(dtype, short):
+ byteorder = _byte_order_str(dtype)
+
+ if dtype.type == np.bool:
+ if short:
+ return "'?'"
+ else:
+ return "'bool'"
+
+ elif dtype.type == np.object_:
+ # The object reference may be different sizes on different
+ # platforms, so it should never include the itemsize here.
+ return "'O'"
+
+ elif dtype.type == np.bytes_:
+ if _isunsized(dtype):
+ return "'S'"
+ else:
+ return "'S%d'" % dtype.itemsize
+
+ elif dtype.type == np.str_:
+ if _isunsized(dtype):
+ return f"'{byteorder}U'"
+ else:
+ return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
+
+ elif dtype.type == str:
+ return "'T'"
+
+ elif not type(dtype)._legacy:
+ return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'"
+
+ # unlike the other types, subclasses of void are preserved - but
+ # historically the repr does not actually reveal the subclass
+ elif issubclass(dtype.type, np.void):
+ if _isunsized(dtype):
+ return "'V'"
+ else:
+ return "'V%d'" % dtype.itemsize
+
+ elif dtype.type == np.datetime64:
+ return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'"
+
+ elif dtype.type == np.timedelta64:
+ return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'"
+
+ elif dtype.isbuiltin == 2:
+ return dtype.type.__name__
+
+ elif np.issubdtype(dtype, np.number):
+ # Short repr with endianness, like '' """
+ # hack to obtain the native and swapped byte order characters
+ swapped = np.dtype(int).newbyteorder('S')
+ native = swapped.newbyteorder('S')
+
+ byteorder = dtype.byteorder
+ if byteorder == '=':
+ return native.byteorder
+ if byteorder == 'S':
+ # TODO: this path can never be reached
+ return swapped.byteorder
+ elif byteorder == '|':
+ return ''
+ else:
+ return byteorder
+
+
+def _datetime_metadata_str(dtype):
+ # TODO: this duplicates the C metastr_to_unicode functionality
+ unit, count = np.datetime_data(dtype)
+ if unit == 'generic':
+ return ''
+ elif count == 1:
+ return f'[{unit}]'
+ else:
+ return f'[{count}{unit}]'
+
+
+def _struct_dict_str(dtype, includealignedflag):
+ # unpack the fields dictionary into ls
+ names = dtype.names
+ fld_dtypes = []
+ offsets = []
+ titles = []
+ for name in names:
+ fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
+ fld_dtypes.append(fld_dtype)
+ offsets.append(offset)
+ titles.append(title)
+
+ # Build up a string to make the dictionary
+
+ if np._core.arrayprint._get_legacy_print_mode() <= 121:
+ colon = ":"
+ fieldsep = ","
+ else:
+ colon = ": "
+ fieldsep = ", "
+
+ # First, the names
+ ret = "{'names'%s[" % colon
+ ret += fieldsep.join(repr(name) for name in names)
+
+ # Second, the formats
+ ret += f"], 'formats'{colon}["
+ ret += fieldsep.join(
+ _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
+
+ # Third, the offsets
+ ret += f"], 'offsets'{colon}["
+ ret += fieldsep.join("%d" % offset for offset in offsets)
+
+ # Fourth, the titles
+ if any(title is not None for title in titles):
+ ret += f"], 'titles'{colon}["
+ ret += fieldsep.join(repr(title) for title in titles)
+
+ # Fifth, the itemsize
+ ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
+
+ if (includealignedflag and dtype.isalignedstruct):
+ # Finally, the aligned flag
+ ret += ", 'aligned'%sTrue}" % colon
+ else:
+ ret += "}"
+
+ return ret
+
+
+def _aligned_offset(offset, alignment):
+ # round up offset:
+ return - (-offset // alignment) * alignment
+
+
+def _is_packed(dtype):
+ """
+ Checks whether the structured data type in 'dtype'
+ has a simple layout, where all the fields are in order,
+ and follow each other with no alignment padding.
+
+ When this returns true, the dtype can be reconstructed
+ from a list of the field names and dtypes with no additional
+ dtype parameters.
+
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
+ """
+ align = dtype.isalignedstruct
+ max_alignment = 1
+ total_offset = 0
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+ if align:
+ total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
+ max_alignment = max(max_alignment, fld_dtype.alignment)
+
+ if fld_offset != total_offset:
+ return False
+ total_offset += fld_dtype.itemsize
+
+ if align:
+ total_offset = _aligned_offset(total_offset, max_alignment)
+
+ return total_offset == dtype.itemsize
+
+
+def _struct_list_str(dtype):
+ items = []
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+ item = "("
+ if title is not None:
+ item += f"({title!r}, {name!r}), "
+ else:
+ item += f"{name!r}, "
+ # Special case subarray handling here
+ if fld_dtype.subdtype is not None:
+ base, shape = fld_dtype.subdtype
+ item += f"{_construction_repr(base, short=True)}, {shape}"
+ else:
+ item += _construction_repr(fld_dtype, short=True)
+
+ item += ")"
+ items.append(item)
+
+ return "[" + ", ".join(items) + "]"
+
+
+def _struct_str(dtype, include_align):
+ # The list str representation can't include the 'align=' flag,
+ # so if it is requested and the struct has the aligned flag set,
+ # we must use the dict str instead.
+ if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
+ sub = _struct_list_str(dtype)
+
+ else:
+ sub = _struct_dict_str(dtype, include_align)
+
+ # If the data type isn't the default, void, show it
+ if dtype.type != np.void:
+ return f"({dtype.type.__module__}.{dtype.type.__name__}, {sub})"
+ else:
+ return sub
+
+
+def _subarray_str(dtype):
+ base, shape = dtype.subdtype
+ return f"({_construction_repr(base, short=True)}, {shape})"
+
+
+def _name_includes_bit_suffix(dtype):
+ if dtype.type == np.object_:
+ # pointer size varies by system, best to omit it
+ return False
+ elif dtype.type == np.bool:
+ # implied
+ return False
+ elif dtype.type is None:
+ return True
+ elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
+ # unspecified
+ return False
+ else:
+ return True
+
+
+def _name_get(dtype):
+ # provides dtype.name.__get__, documented as returning a "bit name"
+
+ if dtype.isbuiltin == 2:
+ # user dtypes don't promise to do anything special
+ return dtype.type.__name__
+
+ if not type(dtype)._legacy:
+ name = type(dtype).__name__
+
+ elif issubclass(dtype.type, np.void):
+ # historically, void subclasses preserve their name, eg `record64`
+ name = dtype.type.__name__
+ else:
+ name = _kind_name(dtype)
+
+ # append bit counts
+ if _name_includes_bit_suffix(dtype):
+ name += f"{dtype.itemsize * 8}"
+
+ # append metadata to datetimes
+ if dtype.type in (np.datetime64, np.timedelta64):
+ name += _datetime_metadata_str(dtype)
+
+ return name
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.pyi
new file mode 100644
index 00000000..6cdd77b2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype.pyi
@@ -0,0 +1,58 @@
+from typing import Final, TypeAlias, TypedDict, overload, type_check_only
+from typing import Literal as L
+
+from typing_extensions import ReadOnly, TypeVar
+
+import numpy as np
+
+###
+
+_T = TypeVar("_T")
+
+_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"]
+
+@type_check_only
+class _KindToStemType(TypedDict):
+ u: ReadOnly[L["uint"]]
+ i: ReadOnly[L["int"]]
+ c: ReadOnly[L["complex"]]
+ f: ReadOnly[L["float"]]
+ b: ReadOnly[L["bool"]]
+ V: ReadOnly[L["void"]]
+ O: ReadOnly[L["object"]]
+ M: ReadOnly[L["datetime"]]
+ m: ReadOnly[L["timedelta"]]
+ S: ReadOnly[L["bytes"]]
+ U: ReadOnly[L["str"]]
+
+###
+
+_kind_to_stem: Final[_KindToStemType] = ...
+
+#
+def _kind_name(dtype: np.dtype) -> _Name: ...
+def __str__(dtype: np.dtype) -> str: ...
+def __repr__(dtype: np.dtype) -> str: ...
+
+#
+def _isunsized(dtype: np.dtype) -> bool: ...
+def _is_packed(dtype: np.dtype) -> bool: ...
+def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ...
+
+#
+def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ...
+def _scalar_str(dtype: np.dtype, short: bool) -> str: ...
+def _byte_order_str(dtype: np.dtype) -> str: ...
+def _datetime_metadata_str(dtype: np.dtype) -> str: ...
+def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ...
+def _struct_list_str(dtype: np.dtype) -> str: ...
+def _struct_str(dtype: np.dtype, include_align: bool) -> str: ...
+def _subarray_str(dtype: np.dtype) -> str: ...
+def _name_get(dtype: np.dtype) -> str: ...
+
+#
+@overload
+def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ...
+@overload
+def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ...
+def _aligned_offset(offset: int, alignment: int) -> int: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.py b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.py
new file mode 100644
index 00000000..4de6df6d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.py
@@ -0,0 +1,120 @@
+"""
+Conversion from ctypes to dtype.
+
+In an ideal world, we could achieve this through the PEP3118 buffer protocol,
+something like::
+
+ def dtype_from_ctypes_type(t):
+ # needed to ensure that the shape of `t` is within memoryview.format
+ class DummyStruct(ctypes.Structure):
+ _fields_ = [('a', t)]
+
+ # empty to avoid memory allocation
+ ctype_0 = (DummyStruct * 0)()
+ mv = memoryview(ctype_0)
+
+ # convert the struct, and slice back out the field
+ return _dtype_from_pep3118(mv.format)['a']
+
+Unfortunately, this fails because:
+
+* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
+* PEP3118 cannot represent unions, but both numpy and ctypes can
+* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
+"""
+
+# We delay-import ctypes for distributions that do not include it.
+# While this module is not used unless the user passes in ctypes
+# members, it is eagerly imported from numpy/_core/__init__.py.
+import numpy as np
+
+
+def _from_ctypes_array(t):
+ return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
+
+
+def _from_ctypes_structure(t):
+ for item in t._fields_:
+ if len(item) > 2:
+ raise TypeError(
+ "ctypes bitfields have no dtype equivalent")
+
+ if hasattr(t, "_pack_"):
+ import ctypes
+ formats = []
+ offsets = []
+ names = []
+ current_offset = 0
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ # Each type has a default offset, this is platform dependent
+ # for some types.
+ effective_pack = min(t._pack_, ctypes.alignment(ftyp))
+ current_offset = (
+ (current_offset + effective_pack - 1) // effective_pack
+ ) * effective_pack
+ offsets.append(current_offset)
+ current_offset += ctypes.sizeof(ftyp)
+
+ return np.dtype({
+ "formats": formats,
+ "offsets": offsets,
+ "names": names,
+ "itemsize": ctypes.sizeof(t)})
+ else:
+ fields = []
+ for fname, ftyp in t._fields_:
+ fields.append((fname, dtype_from_ctypes_type(ftyp)))
+
+ # by default, ctypes structs are aligned
+ return np.dtype(fields, align=True)
+
+
+def _from_ctypes_scalar(t):
+ """
+ Return the dtype type with endianness included if it's the case
+ """
+ if getattr(t, '__ctype_be__', None) is t:
+ return np.dtype('>' + t._type_)
+ elif getattr(t, '__ctype_le__', None) is t:
+ return np.dtype('<' + t._type_)
+ else:
+ return np.dtype(t._type_)
+
+
+def _from_ctypes_union(t):
+ import ctypes
+ formats = []
+ offsets = []
+ names = []
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ offsets.append(0) # Union fields are offset to 0
+
+ return np.dtype({
+ "formats": formats,
+ "offsets": offsets,
+ "names": names,
+ "itemsize": ctypes.sizeof(t)})
+
+
+def dtype_from_ctypes_type(t):
+ """
+ Construct a dtype object from a ctypes type
+ """
+ import _ctypes
+ if issubclass(t, _ctypes.Array):
+ return _from_ctypes_array(t)
+ elif issubclass(t, _ctypes._Pointer):
+ raise TypeError("ctypes pointers have no dtype equivalent")
+ elif issubclass(t, _ctypes.Structure):
+ return _from_ctypes_structure(t)
+ elif issubclass(t, _ctypes.Union):
+ return _from_ctypes_union(t)
+ elif isinstance(getattr(t, '_type_', None), str):
+ return _from_ctypes_scalar(t)
+ else:
+ raise NotImplementedError(
+ f"Unknown ctypes type {t.__name__}")
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.pyi
new file mode 100644
index 00000000..69438a2c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_dtype_ctypes.pyi
@@ -0,0 +1,83 @@
+import _ctypes
+import ctypes as ct
+from typing import Any, overload
+
+import numpy as np
+
+#
+@overload
+def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
+@overload
+def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
+
+# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see
+# https://github.com/numpy/numpy/issues/28360
+
+#
+def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ...
+def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ...
+def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ...
+
+# keep in sync with `dtype_from_ctypes_type` (minus the first overload)
+@overload
+def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
+@overload
+def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.py b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.py
new file mode 100644
index 00000000..73b07d25
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.py
@@ -0,0 +1,162 @@
+"""
+Various richly-typed exceptions, that also help us deal with string formatting
+in python where it's easier.
+
+By putting the formatting in `__str__`, we also avoid paying the cost for
+users who silence the exceptions.
+"""
+
+def _unpack_tuple(tup):
+ if len(tup) == 1:
+ return tup[0]
+ else:
+ return tup
+
+
+def _display_as_base(cls):
+ """
+ A decorator that makes an exception class look like its base.
+
+ We use this to hide subclasses that are implementation details - the user
+ should catch the base type, which is what the traceback will show them.
+
+ Classes decorated with this decorator are subject to removal without a
+ deprecation warning.
+ """
+ assert issubclass(cls, Exception)
+ cls.__name__ = cls.__base__.__name__
+ return cls
+
+
+class UFuncTypeError(TypeError):
+ """ Base class for all ufunc exceptions """
+ def __init__(self, ufunc):
+ self.ufunc = ufunc
+
+
+@_display_as_base
+class _UFuncNoLoopError(UFuncTypeError):
+ """ Thrown when a ufunc loop cannot be found """
+ def __init__(self, ufunc, dtypes):
+ super().__init__(ufunc)
+ self.dtypes = tuple(dtypes)
+
+ def __str__(self):
+ return (
+ f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature "
+ f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} "
+ f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}"
+ )
+
+
+@_display_as_base
+class _UFuncBinaryResolutionError(_UFuncNoLoopError):
+ """ Thrown when a binary resolution fails """
+ def __init__(self, ufunc, dtypes):
+ super().__init__(ufunc, dtypes)
+ assert len(self.dtypes) == 2
+
+ def __str__(self):
+ return (
+ "ufunc {!r} cannot use operands with types {!r} and {!r}"
+ ).format(
+ self.ufunc.__name__, *self.dtypes
+ )
+
+
+@_display_as_base
+class _UFuncCastingError(UFuncTypeError):
+ def __init__(self, ufunc, casting, from_, to):
+ super().__init__(ufunc)
+ self.casting = casting
+ self.from_ = from_
+ self.to = to
+
+
+@_display_as_base
+class _UFuncInputCastingError(_UFuncCastingError):
+ """ Thrown when a ufunc input cannot be casted """
+ def __init__(self, ufunc, casting, from_, to, i):
+ super().__init__(ufunc, casting, from_, to)
+ self.in_i = i
+
+ def __str__(self):
+ # only show the number if more than one input exists
+ i_str = f"{self.in_i} " if self.ufunc.nin != 1 else ""
+ return (
+ f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from "
+ f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"
+ )
+
+
+@_display_as_base
+class _UFuncOutputCastingError(_UFuncCastingError):
+ """ Thrown when a ufunc output cannot be casted """
+ def __init__(self, ufunc, casting, from_, to, i):
+ super().__init__(ufunc, casting, from_, to)
+ self.out_i = i
+
+ def __str__(self):
+ # only show the number if more than one output exists
+ i_str = f"{self.out_i} " if self.ufunc.nout != 1 else ""
+ return (
+ f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from "
+ f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"
+ )
+
+
+@_display_as_base
+class _ArrayMemoryError(MemoryError):
+ """ Thrown when an array cannot be allocated"""
+ def __init__(self, shape, dtype):
+ self.shape = shape
+ self.dtype = dtype
+
+ @property
+ def _total_size(self):
+ num_bytes = self.dtype.itemsize
+ for dim in self.shape:
+ num_bytes *= dim
+ return num_bytes
+
+ @staticmethod
+ def _size_to_string(num_bytes):
+ """ Convert a number of bytes into a binary size string """
+
+ # https://en.wikipedia.org/wiki/Binary_prefix
+ LOG2_STEP = 10
+ STEP = 1024
+ units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
+
+ unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
+ unit_val = 1 << (unit_i * LOG2_STEP)
+ n_units = num_bytes / unit_val
+ del unit_val
+
+ # ensure we pick a unit that is correct after rounding
+ if round(n_units) == STEP:
+ unit_i += 1
+ n_units /= STEP
+
+ # deal with sizes so large that we don't have units for them
+ if unit_i >= len(units):
+ new_unit_i = len(units) - 1
+ n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
+ unit_i = new_unit_i
+
+ unit_name = units[unit_i]
+ # format with a sensible number of digits
+ if unit_i == 0:
+ # no decimal point on bytes
+ return f'{n_units:.0f} {unit_name}'
+ elif round(n_units) < 1000:
+ # 3 significant figures, if none are dropped to the left of the .
+ return f'{n_units:#.3g} {unit_name}'
+ else:
+ # just give all the digits otherwise
+ return f'{n_units:#.0f} {unit_name}'
+
+ def __str__(self):
+ size_str = self._size_to_string(self._total_size)
+ return (f"Unable to allocate {size_str} for an array with shape "
+ f"{self.shape} and data type {self.dtype}")
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.pyi
new file mode 100644
index 00000000..02637a17
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_exceptions.pyi
@@ -0,0 +1,55 @@
+from collections.abc import Iterable
+from typing import Any, Final, TypeVar, overload
+
+import numpy as np
+from numpy import _CastingKind
+from numpy._utils import set_module as set_module
+
+###
+
+_T = TypeVar("_T")
+_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])
+_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
+
+###
+
+class UFuncTypeError(TypeError):
+ ufunc: Final[np.ufunc]
+ def __init__(self, /, ufunc: np.ufunc) -> None: ...
+
+class _UFuncNoLoopError(UFuncTypeError):
+ dtypes: tuple[np.dtype, ...]
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
+
+class _UFuncBinaryResolutionError(_UFuncNoLoopError):
+ dtypes: tuple[np.dtype, np.dtype]
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
+
+class _UFuncCastingError(UFuncTypeError):
+ casting: Final[_CastingKind]
+ from_: Final[np.dtype]
+ to: Final[np.dtype]
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...
+
+class _UFuncInputCastingError(_UFuncCastingError):
+ in_i: Final[int]
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
+
+class _UFuncOutputCastingError(_UFuncCastingError):
+ out_i: Final[int]
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
+
+class _ArrayMemoryError(MemoryError):
+ shape: tuple[int, ...]
+ dtype: np.dtype
+ def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...
+ @property
+ def _total_size(self) -> int: ...
+ @staticmethod
+ def _size_to_string(num_bytes: int) -> str: ...
+
+@overload
+def _unpack_tuple(tup: tuple[_T]) -> _T: ...
+@overload
+def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
+def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_internal.py b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.py
new file mode 100644
index 00000000..e00e1b2c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.py
@@ -0,0 +1,958 @@
+"""
+A place for internal code
+
+Some things are more easily handled Python.
+
+"""
+import ast
+import math
+import re
+import sys
+import warnings
+
+from numpy import _NoValue
+from numpy.exceptions import DTypePromotionError
+
+from .multiarray import StringDType, array, dtype, promote_types
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+IS_PYPY = sys.implementation.name == 'pypy'
+
+if sys.byteorder == 'little':
+ _nbo = '<'
+else:
+ _nbo = '>'
+
+def _makenames_list(adict, align):
+ allfields = []
+
+ for fname, obj in adict.items():
+ n = len(obj)
+ if not isinstance(obj, tuple) or n not in (2, 3):
+ raise ValueError("entry not a 2- or 3- tuple")
+ if n > 2 and obj[2] == fname:
+ continue
+ num = int(obj[1])
+ if num < 0:
+ raise ValueError("invalid offset.")
+ format = dtype(obj[0], align=align)
+ if n > 2:
+ title = obj[2]
+ else:
+ title = None
+ allfields.append((fname, format, num, title))
+ # sort by offsets
+ allfields.sort(key=lambda x: x[2])
+ names = [x[0] for x in allfields]
+ formats = [x[1] for x in allfields]
+ offsets = [x[2] for x in allfields]
+ titles = [x[3] for x in allfields]
+
+ return names, formats, offsets, titles
+
+# Called in PyArray_DescrConverter function when
+# a dictionary without "names" and "formats"
+# fields is used as a data-type descriptor.
+def _usefields(adict, align):
+ try:
+ names = adict[-1]
+ except KeyError:
+ names = None
+ if names is None:
+ names, formats, offsets, titles = _makenames_list(adict, align)
+ else:
+ formats = []
+ offsets = []
+ titles = []
+ for name in names:
+ res = adict[name]
+ formats.append(res[0])
+ offsets.append(res[1])
+ if len(res) > 2:
+ titles.append(res[2])
+ else:
+ titles.append(None)
+
+ return dtype({"names": names,
+ "formats": formats,
+ "offsets": offsets,
+ "titles": titles}, align)
+
+
+# construct an array_protocol descriptor list
+# from the fields attribute of a descriptor
+# This calls itself recursively but should eventually hit
+# a descriptor that has no fields and then return
+# a simple typestring
+
+def _array_descr(descriptor):
+ fields = descriptor.fields
+ if fields is None:
+ subdtype = descriptor.subdtype
+ if subdtype is None:
+ if descriptor.metadata is None:
+ return descriptor.str
+ else:
+ new = descriptor.metadata.copy()
+ if new:
+ return (descriptor.str, new)
+ else:
+ return descriptor.str
+ else:
+ return (_array_descr(subdtype[0]), subdtype[1])
+
+ names = descriptor.names
+ ordered_fields = [fields[x] + (x,) for x in names]
+ result = []
+ offset = 0
+ for field in ordered_fields:
+ if field[1] > offset:
+ num = field[1] - offset
+ result.append(('', f'|V{num}'))
+ offset += num
+ elif field[1] < offset:
+ raise ValueError(
+ "dtype.descr is not defined for types with overlapping or "
+ "out-of-order fields")
+ if len(field) > 3:
+ name = (field[2], field[3])
+ else:
+ name = field[2]
+ if field[0].subdtype:
+ tup = (name, _array_descr(field[0].subdtype[0]),
+ field[0].subdtype[1])
+ else:
+ tup = (name, _array_descr(field[0]))
+ offset += field[0].itemsize
+ result.append(tup)
+
+ if descriptor.itemsize > offset:
+ num = descriptor.itemsize - offset
+ result.append(('', f'|V{num}'))
+
+ return result
+
+
+# format_re was originally from numarray by J. Todd Miller
+
+format_re = re.compile(r'(?P[<>|=]?)'
+ r'(?P *[(]?[ ,0-9]*[)]? *)'
+ r'(?P[<>|=]?)'
+ r'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
+sep_re = re.compile(r'\s*,\s*')
+space_re = re.compile(r'\s+$')
+
+# astr is a string (perhaps comma separated)
+
+_convorder = {'=': _nbo}
+
+def _commastring(astr):
+ startindex = 0
+ result = []
+ islist = False
+ while startindex < len(astr):
+ mo = format_re.match(astr, pos=startindex)
+ try:
+ (order1, repeats, order2, dtype) = mo.groups()
+ except (TypeError, AttributeError):
+ raise ValueError(
+ f'format number {len(result) + 1} of "{astr}" is not recognized'
+ ) from None
+ startindex = mo.end()
+ # Separator or ending padding
+ if startindex < len(astr):
+ if space_re.match(astr, pos=startindex):
+ startindex = len(astr)
+ else:
+ mo = sep_re.match(astr, pos=startindex)
+ if not mo:
+ raise ValueError(
+ 'format number %d of "%s" is not recognized' %
+ (len(result) + 1, astr))
+ startindex = mo.end()
+ islist = True
+
+ if order2 == '':
+ order = order1
+ elif order1 == '':
+ order = order2
+ else:
+ order1 = _convorder.get(order1, order1)
+ order2 = _convorder.get(order2, order2)
+ if (order1 != order2):
+ raise ValueError(
+ f'inconsistent byte-order specification {order1} and {order2}')
+ order = order1
+
+ if order in ('|', '=', _nbo):
+ order = ''
+ dtype = order + dtype
+ if repeats == '':
+ newitem = dtype
+ else:
+ if (repeats[0] == "(" and repeats[-1] == ")"
+ and repeats[1:-1].strip() != ""
+ and "," not in repeats):
+ warnings.warn(
+ 'Passing in a parenthesized single number for repeats '
+ 'is deprecated; pass either a single number or indicate '
+ 'a tuple with a comma, like "(2,)".', DeprecationWarning,
+ stacklevel=2)
+ newitem = (dtype, ast.literal_eval(repeats))
+
+ result.append(newitem)
+
+ return result if islist else result[0]
+
+class dummy_ctype:
+
+ def __init__(self, cls):
+ self._cls = cls
+
+ def __mul__(self, other):
+ return self
+
+ def __call__(self, *other):
+ return self._cls(other)
+
+ def __eq__(self, other):
+ return self._cls == other._cls
+
+ def __ne__(self, other):
+ return self._cls != other._cls
+
+def _getintp_ctype():
+ val = _getintp_ctype.cache
+ if val is not None:
+ return val
+ if ctypes is None:
+ import numpy as np
+ val = dummy_ctype(np.intp)
+ else:
+ char = dtype('n').char
+ if char == 'i':
+ val = ctypes.c_int
+ elif char == 'l':
+ val = ctypes.c_long
+ elif char == 'q':
+ val = ctypes.c_longlong
+ else:
+ val = ctypes.c_long
+ _getintp_ctype.cache = val
+ return val
+
+
+_getintp_ctype.cache = None
+
+# Used for .ctypes attribute of ndarray
+
+class _missing_ctypes:
+ def cast(self, num, obj):
+ return num.value
+
+ class c_void_p:
+ def __init__(self, ptr):
+ self.value = ptr
+
+
+class _ctypes:
+ def __init__(self, array, ptr=None):
+ self._arr = array
+
+ if ctypes:
+ self._ctypes = ctypes
+ self._data = self._ctypes.c_void_p(ptr)
+ else:
+ # fake a pointer-like object that holds onto the reference
+ self._ctypes = _missing_ctypes()
+ self._data = self._ctypes.c_void_p(ptr)
+ self._data._objects = array
+
+ if self._arr.ndim == 0:
+ self._zerod = True
+ else:
+ self._zerod = False
+
+ def data_as(self, obj):
+ """
+ Return the data pointer cast to a particular c-types object.
+ For example, calling ``self._as_parameter_`` is equivalent to
+ ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use
+ the data as a pointer to a ctypes array of floating-point data:
+ ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
+
+ The returned pointer will keep a reference to the array.
+ """
+ # _ctypes.cast function causes a circular reference of self._data in
+ # self._data._objects. Attributes of self._data cannot be released
+ # until gc.collect is called. Make a copy of the pointer first then
+ # let it hold the array reference. This is a workaround to circumvent
+ # the CPython bug https://bugs.python.org/issue12836.
+ ptr = self._ctypes.cast(self._data, obj)
+ ptr._arr = self._arr
+ return ptr
+
+ def shape_as(self, obj):
+ """
+ Return the shape tuple as an array of some other c-types
+ type. For example: ``self.shape_as(ctypes.c_short)``.
+ """
+ if self._zerod:
+ return None
+ return (obj * self._arr.ndim)(*self._arr.shape)
+
+ def strides_as(self, obj):
+ """
+ Return the strides tuple as an array of some other
+ c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
+ """
+ if self._zerod:
+ return None
+ return (obj * self._arr.ndim)(*self._arr.strides)
+
+ @property
+ def data(self):
+ """
+ A pointer to the memory area of the array as a Python integer.
+ This memory area may contain data that is not aligned, or not in
+ correct byte-order. The memory area may not even be writeable.
+ The array flags and data-type of this array should be respected
+ when passing this attribute to arbitrary C-code to avoid trouble
+ that can include Python crashing. User Beware! The value of this
+ attribute is exactly the same as:
+ ``self._array_interface_['data'][0]``.
+
+ Note that unlike ``data_as``, a reference won't be kept to the array:
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
+ pointer to a deallocated array, and should be spelt
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
+ """
+ return self._data.value
+
+ @property
+ def shape(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the C-integer corresponding to ``dtype('p')`` on this
+ platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
+ `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
+ the platform. The ctypes array contains the shape of
+ the underlying array.
+ """
+ return self.shape_as(_getintp_ctype())
+
+ @property
+ def strides(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the same as for the shape attribute. This ctypes
+ array contains the strides information from the underlying array.
+ This strides information is important for showing how many bytes
+ must be jumped to get to the next element in the array.
+ """
+ return self.strides_as(_getintp_ctype())
+
+ @property
+ def _as_parameter_(self):
+ """
+ Overrides the ctypes semi-magic method
+
+ Enables `c_func(some_array.ctypes)`
+ """
+ return self.data_as(ctypes.c_void_p)
+
+ # Numpy 1.21.0, 2021-05-18
+
+ def get_data(self):
+ """Deprecated getter for the `_ctypes.data` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn('"get_data" is deprecated. Use "data" instead',
+ DeprecationWarning, stacklevel=2)
+ return self.data
+
+ def get_shape(self):
+ """Deprecated getter for the `_ctypes.shape` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn('"get_shape" is deprecated. Use "shape" instead',
+ DeprecationWarning, stacklevel=2)
+ return self.shape
+
+ def get_strides(self):
+ """Deprecated getter for the `_ctypes.strides` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn('"get_strides" is deprecated. Use "strides" instead',
+ DeprecationWarning, stacklevel=2)
+ return self.strides
+
+ def get_as_parameter(self):
+ """Deprecated getter for the `_ctypes._as_parameter_` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn(
+ '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
+ DeprecationWarning, stacklevel=2,
+ )
+ return self._as_parameter_
+
+
+def _newnames(datatype, order):
+ """
+ Given a datatype and an order object, return a new names tuple, with the
+ order indicated
+ """
+ oldnames = datatype.names
+ nameslist = list(oldnames)
+ if isinstance(order, str):
+ order = [order]
+ seen = set()
+ if isinstance(order, (list, tuple)):
+ for name in order:
+ try:
+ nameslist.remove(name)
+ except ValueError:
+ if name in seen:
+ raise ValueError(f"duplicate field name: {name}") from None
+ else:
+ raise ValueError(f"unknown field name: {name}") from None
+ seen.add(name)
+ return tuple(list(order) + nameslist)
+ raise ValueError(f"unsupported order value: {order}")
+
+def _copy_fields(ary):
+ """Return copy of structured array with padding between fields removed.
+
+ Parameters
+ ----------
+ ary : ndarray
+ Structured array from which to remove padding bytes
+
+ Returns
+ -------
+ ary_copy : ndarray
+ Copy of ary with padding bytes removed
+ """
+ dt = ary.dtype
+ copy_dtype = {'names': dt.names,
+ 'formats': [dt.fields[name][0] for name in dt.names]}
+ return array(ary, dtype=copy_dtype, copy=True)
+
+def _promote_fields(dt1, dt2):
+ """ Perform type promotion for two structured dtypes.
+
+ Parameters
+ ----------
+ dt1 : structured dtype
+ First dtype.
+ dt2 : structured dtype
+ Second dtype.
+
+ Returns
+ -------
+ out : dtype
+ The promoted dtype
+
+ Notes
+ -----
+ If one of the inputs is aligned, the result will be. The titles of
+ both descriptors must match (point to the same field).
+ """
+ # Both must be structured and have the same names in the same order
+ if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
+ raise DTypePromotionError(
+ f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
+
+ # if both are identical, we can (maybe!) just return the same dtype.
+ identical = dt1 is dt2
+ new_fields = []
+ for name in dt1.names:
+ field1 = dt1.fields[name]
+ field2 = dt2.fields[name]
+ new_descr = promote_types(field1[0], field2[0])
+ identical = identical and new_descr is field1[0]
+
+ # Check that the titles match (if given):
+ if field1[2:] != field2[2:]:
+ raise DTypePromotionError(
+ f"field titles of field '{name}' mismatch")
+ if len(field1) == 2:
+ new_fields.append((name, new_descr))
+ else:
+ new_fields.append(((field1[2], name), new_descr))
+
+ res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
+
+ # Might as well preserve identity (and metadata) if the dtype is identical
+ # and the itemsize, offsets are also unmodified. This could probably be
+ # sped up, but also probably just be removed entirely.
+ if identical and res.itemsize == dt1.itemsize:
+ for name in dt1.names:
+ if dt1.fields[name][1] != res.fields[name][1]:
+ return res # the dtype changed.
+ return dt1
+
+ return res
+
+
+def _getfield_is_safe(oldtype, newtype, offset):
+ """ Checks safety of getfield for object arrays.
+
+ As in _view_is_safe, we need to check that memory containing objects is not
+ reinterpreted as a non-object datatype and vice versa.
+
+ Parameters
+ ----------
+ oldtype : data-type
+ Data type of the original ndarray.
+ newtype : data-type
+ Data type of the field being accessed by ndarray.getfield
+ offset : int
+ Offset of the field being accessed by ndarray.getfield
+
+ Raises
+ ------
+ TypeError
+ If the field access is invalid
+
+ """
+ if newtype.hasobject or oldtype.hasobject:
+ if offset == 0 and newtype == oldtype:
+ return
+ if oldtype.names is not None:
+ for name in oldtype.names:
+ if (oldtype.fields[name][1] == offset and
+ oldtype.fields[name][0] == newtype):
+ return
+ raise TypeError("Cannot get/set field of an object array")
+ return
+
+def _view_is_safe(oldtype, newtype):
+ """ Checks safety of a view involving object arrays, for example when
+ doing::
+
+ np.zeros(10, dtype=oldtype).view(newtype)
+
+ Parameters
+ ----------
+ oldtype : data-type
+ Data type of original ndarray
+ newtype : data-type
+ Data type of the view
+
+ Raises
+ ------
+ TypeError
+ If the new type is incompatible with the old type.
+
+ """
+
+ # if the types are equivalent, there is no problem.
+ # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
+ if oldtype == newtype:
+ return
+
+ if newtype.hasobject or oldtype.hasobject:
+ raise TypeError("Cannot change data-type for array of references.")
+ return
+
+
+# Given a string containing a PEP 3118 format specifier,
+# construct a NumPy dtype
+
+_pep3118_native_map = {
+ '?': '?',
+ 'c': 'S1',
+ 'b': 'b',
+ 'B': 'B',
+ 'h': 'h',
+ 'H': 'H',
+ 'i': 'i',
+ 'I': 'I',
+ 'l': 'l',
+ 'L': 'L',
+ 'q': 'q',
+ 'Q': 'Q',
+ 'e': 'e',
+ 'f': 'f',
+ 'd': 'd',
+ 'g': 'g',
+ 'Zf': 'F',
+ 'Zd': 'D',
+ 'Zg': 'G',
+ 's': 'S',
+ 'w': 'U',
+ 'O': 'O',
+ 'x': 'V', # padding
+}
+_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
+
+_pep3118_standard_map = {
+ '?': '?',
+ 'c': 'S1',
+ 'b': 'b',
+ 'B': 'B',
+ 'h': 'i2',
+ 'H': 'u2',
+ 'i': 'i4',
+ 'I': 'u4',
+ 'l': 'i4',
+ 'L': 'u4',
+ 'q': 'i8',
+ 'Q': 'u8',
+ 'e': 'f2',
+ 'f': 'f',
+ 'd': 'd',
+ 'Zf': 'F',
+ 'Zd': 'D',
+ 's': 'S',
+ 'w': 'U',
+ 'O': 'O',
+ 'x': 'V', # padding
+}
+_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
+
+_pep3118_unsupported_map = {
+ 'u': 'UCS-2 strings',
+ '&': 'pointers',
+ 't': 'bitfields',
+ 'X': 'function pointers',
+}
+
+class _Stream:
+ def __init__(self, s):
+ self.s = s
+ self.byteorder = '@'
+
+ def advance(self, n):
+ res = self.s[:n]
+ self.s = self.s[n:]
+ return res
+
+ def consume(self, c):
+ if self.s[:len(c)] == c:
+ self.advance(len(c))
+ return True
+ return False
+
+ def consume_until(self, c):
+ if callable(c):
+ i = 0
+ while i < len(self.s) and not c(self.s[i]):
+ i = i + 1
+ return self.advance(i)
+ else:
+ i = self.s.index(c)
+ res = self.advance(i)
+ self.advance(len(c))
+ return res
+
+ @property
+ def next(self):
+ return self.s[0]
+
+ def __bool__(self):
+ return bool(self.s)
+
+
+def _dtype_from_pep3118(spec):
+ stream = _Stream(spec)
+ dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
+ return dtype
+
+def __dtype_from_pep3118(stream, is_subdtype):
+ field_spec = {
+ 'names': [],
+ 'formats': [],
+ 'offsets': [],
+ 'itemsize': 0
+ }
+ offset = 0
+ common_alignment = 1
+ is_padding = False
+
+ # Parse spec
+ while stream:
+ value = None
+
+ # End of structure, bail out to upper level
+ if stream.consume('}'):
+ break
+
+ # Sub-arrays (1)
+ shape = None
+ if stream.consume('('):
+ shape = stream.consume_until(')')
+ shape = tuple(map(int, shape.split(',')))
+
+ # Byte order
+ if stream.next in ('@', '=', '<', '>', '^', '!'):
+ byteorder = stream.advance(1)
+ if byteorder == '!':
+ byteorder = '>'
+ stream.byteorder = byteorder
+
+ # Byte order characters also control native vs. standard type sizes
+ if stream.byteorder in ('@', '^'):
+ type_map = _pep3118_native_map
+ type_map_chars = _pep3118_native_typechars
+ else:
+ type_map = _pep3118_standard_map
+ type_map_chars = _pep3118_standard_typechars
+
+ # Item sizes
+ itemsize_str = stream.consume_until(lambda c: not c.isdigit())
+ if itemsize_str:
+ itemsize = int(itemsize_str)
+ else:
+ itemsize = 1
+
+ # Data types
+ is_padding = False
+
+ if stream.consume('T{'):
+ value, align = __dtype_from_pep3118(
+ stream, is_subdtype=True)
+ elif stream.next in type_map_chars:
+ if stream.next == 'Z':
+ typechar = stream.advance(2)
+ else:
+ typechar = stream.advance(1)
+
+ is_padding = (typechar == 'x')
+ dtypechar = type_map[typechar]
+ if dtypechar in 'USV':
+ dtypechar += '%d' % itemsize
+ itemsize = 1
+ numpy_byteorder = {'@': '=', '^': '='}.get(
+ stream.byteorder, stream.byteorder)
+ value = dtype(numpy_byteorder + dtypechar)
+ align = value.alignment
+ elif stream.next in _pep3118_unsupported_map:
+ desc = _pep3118_unsupported_map[stream.next]
+ raise NotImplementedError(
+ f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})")
+ else:
+ raise ValueError(
+ f"Unknown PEP 3118 data type specifier {stream.s!r}"
+ )
+
+ #
+ # Native alignment may require padding
+ #
+ # Here we assume that the presence of a '@' character implicitly
+ # implies that the start of the array is *already* aligned.
+ #
+ extra_offset = 0
+ if stream.byteorder == '@':
+ start_padding = (-offset) % align
+ intra_padding = (-value.itemsize) % align
+
+ offset += start_padding
+
+ if intra_padding != 0:
+ if itemsize > 1 or (shape is not None and _prod(shape) > 1):
+ # Inject internal padding to the end of the sub-item
+ value = _add_trailing_padding(value, intra_padding)
+ else:
+ # We can postpone the injection of internal padding,
+ # as the item appears at most once
+ extra_offset += intra_padding
+
+ # Update common alignment
+ common_alignment = _lcm(align, common_alignment)
+
+ # Convert itemsize to sub-array
+ if itemsize != 1:
+ value = dtype((value, (itemsize,)))
+
+ # Sub-arrays (2)
+ if shape is not None:
+ value = dtype((value, shape))
+
+ # Field name
+ if stream.consume(':'):
+ name = stream.consume_until(':')
+ else:
+ name = None
+
+ if not (is_padding and name is None):
+ if name is not None and name in field_spec['names']:
+ raise RuntimeError(
+ f"Duplicate field name '{name}' in PEP3118 format"
+ )
+ field_spec['names'].append(name)
+ field_spec['formats'].append(value)
+ field_spec['offsets'].append(offset)
+
+ offset += value.itemsize
+ offset += extra_offset
+
+ field_spec['itemsize'] = offset
+
+ # extra final padding for aligned types
+ if stream.byteorder == '@':
+ field_spec['itemsize'] += (-offset) % common_alignment
+
+ # Check if this was a simple 1-item type, and unwrap it
+ if (field_spec['names'] == [None]
+ and field_spec['offsets'][0] == 0
+ and field_spec['itemsize'] == field_spec['formats'][0].itemsize
+ and not is_subdtype):
+ ret = field_spec['formats'][0]
+ else:
+ _fix_names(field_spec)
+ ret = dtype(field_spec)
+
+ # Finished
+ return ret, common_alignment
+
+def _fix_names(field_spec):
+ """ Replace names which are None with the next unused f%d name """
+ names = field_spec['names']
+ for i, name in enumerate(names):
+ if name is not None:
+ continue
+
+ j = 0
+ while True:
+ name = f'f{j}'
+ if name not in names:
+ break
+ j = j + 1
+ names[i] = name
+
+def _add_trailing_padding(value, padding):
+ """Inject the specified number of padding bytes at the end of a dtype"""
+ if value.fields is None:
+ field_spec = {
+ 'names': ['f0'],
+ 'formats': [value],
+ 'offsets': [0],
+ 'itemsize': value.itemsize
+ }
+ else:
+ fields = value.fields
+ names = value.names
+ field_spec = {
+ 'names': names,
+ 'formats': [fields[name][0] for name in names],
+ 'offsets': [fields[name][1] for name in names],
+ 'itemsize': value.itemsize
+ }
+
+ field_spec['itemsize'] += padding
+ return dtype(field_spec)
+
+def _prod(a):
+ p = 1
+ for x in a:
+ p *= x
+ return p
+
+def _gcd(a, b):
+ """Calculate the greatest common divisor of a and b"""
+ if not (math.isfinite(a) and math.isfinite(b)):
+ raise ValueError('Can only find greatest common divisor of '
+ f'finite arguments, found "{a}" and "{b}"')
+ while b:
+ a, b = b, a % b
+ return a
+
+def _lcm(a, b):
+ return a // _gcd(a, b) * b
+
+def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ args_string = ', '.join([f'{arg!r}' for arg in inputs] +
+ [f'{k}={v!r}'
+ for k, v in kwargs.items()])
+ args = inputs + kwargs.get('out', ())
+ types_string = ', '.join(repr(type(arg).__name__) for arg in args)
+ return ('operand type(s) all returned NotImplemented from '
+ f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}'
+ )
+
+
+def array_function_errmsg_formatter(public_api, types):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ func_name = f'{public_api.__module__}.{public_api.__name__}'
+ return (f"no implementation found for '{func_name}' on types that implement "
+ f'__array_function__: {list(types)}')
+
+
+def _ufunc_doc_signature_formatter(ufunc):
+ """
+ Builds a signature string which resembles PEP 457
+
+ This is used to construct the first line of the docstring
+ """
+
+ # input arguments are simple
+ if ufunc.nin == 1:
+ in_args = 'x'
+ else:
+ in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin))
+
+ # output arguments are both keyword or positional
+ if ufunc.nout == 0:
+ out_args = ', /, out=()'
+ elif ufunc.nout == 1:
+ out_args = ', /, out=None'
+ else:
+ out_args = '[, {positional}], / [, out={default}]'.format(
+ positional=', '.join(
+ f'out{i + 1}' for i in range(ufunc.nout)),
+ default=repr((None,) * ufunc.nout)
+ )
+
+ # keyword only args depend on whether this is a gufunc
+ kwargs = (
+ ", casting='same_kind'"
+ ", order='K'"
+ ", dtype=None"
+ ", subok=True"
+ )
+
+ # NOTE: gufuncs may or may not support the `axis` parameter
+ if ufunc.signature is None:
+ kwargs = f", where=True{kwargs}[, signature]"
+ else:
+ kwargs += "[, signature, axes, axis]"
+
+ # join all the parts together
+ return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})'
+
+
+def npy_ctypes_check(cls):
+ # determine if a class comes from ctypes, in order to work around
+ # a bug in the buffer protocol for those objects, bpo-10746
+ try:
+ # ctypes class are new-style, so have an __mro__. This probably fails
+ # for ctypes classes with multiple inheritance.
+ if IS_PYPY:
+ # (..., _ctypes.basics._CData, Bufferable, object)
+ ctype_base = cls.__mro__[-3]
+ else:
+ # # (..., _ctypes._CData, object)
+ ctype_base = cls.__mro__[-2]
+ # right now, they're part of the _ctypes module
+ return '_ctypes' in ctype_base.__module__
+ except Exception:
+ return False
+
+# used to handle the _NoValue default argument for na_object
+# in the C implementation of the __reduce__ method for stringdtype
+def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):
+ if na_object is _NoValue:
+ return StringDType(coerce=coerce)
+ return StringDType(coerce=coerce, na_object=na_object)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_internal.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.pyi
new file mode 100644
index 00000000..3038297b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_internal.pyi
@@ -0,0 +1,72 @@
+import ctypes as ct
+import re
+from collections.abc import Callable, Iterable
+from typing import Any, Final, Generic, Self, overload
+
+from typing_extensions import TypeVar, deprecated
+
+import numpy as np
+import numpy.typing as npt
+from numpy.ctypeslib import c_intp
+
+_CastT = TypeVar("_CastT", bound=ct._CanCastTo)
+_T_co = TypeVar("_T_co", covariant=True)
+_CT = TypeVar("_CT", bound=ct._CData)
+_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True)
+
+###
+
+IS_PYPY: Final[bool] = ...
+
+format_re: Final[re.Pattern[str]] = ...
+sep_re: Final[re.Pattern[str]] = ...
+space_re: Final[re.Pattern[str]] = ...
+
+###
+
+# TODO: Let the likes of `shape_as` and `strides_as` return `None`
+# for 0D arrays once we've got shape-support
+
+class _ctypes(Generic[_PT_co]):
+ @overload
+ def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ...
+ @overload
+ def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ...
+
+ #
+ @property
+ def data(self) -> _PT_co: ...
+ @property
+ def shape(self) -> ct.Array[c_intp]: ...
+ @property
+ def strides(self) -> ct.Array[c_intp]: ...
+ @property
+ def _as_parameter_(self) -> ct.c_void_p: ...
+
+ #
+ def data_as(self, /, obj: type[_CastT]) -> _CastT: ...
+ def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
+ def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
+
+ #
+ @deprecated('"get_data" is deprecated. Use "data" instead')
+ def get_data(self, /) -> _PT_co: ...
+ @deprecated('"get_shape" is deprecated. Use "shape" instead')
+ def get_shape(self, /) -> ct.Array[c_intp]: ...
+ @deprecated('"get_strides" is deprecated. Use "strides" instead')
+ def get_strides(self, /) -> ct.Array[c_intp]: ...
+ @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead')
+ def get_as_parameter(self, /) -> ct.c_void_p: ...
+
+class dummy_ctype(Generic[_T_co]):
+ _cls: type[_T_co]
+
+ def __init__(self, /, cls: type[_T_co]) -> None: ...
+ def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ def __mul__(self, other: object, /) -> Self: ...
+ def __call__(self, /, *other: object) -> _T_co: ...
+
+def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ...
+def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ...
+def npy_ctypes_check(cls: type) -> bool: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_machar.py b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.py
new file mode 100644
index 00000000..b49742a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.py
@@ -0,0 +1,355 @@
+"""
+Machine arithmetic - determine the parameters of the
+floating-point arithmetic system
+
+Author: Pearu Peterson, September 2003
+
+"""
+__all__ = ['MachAr']
+
+from ._ufunc_config import errstate
+from .fromnumeric import any
+
+# Need to speed this up...especially for longdouble
+
+# Deprecated 2021-10-20, NumPy 1.22
+class MachAr:
+ """
+ Diagnosing machine parameters.
+
+ Attributes
+ ----------
+ ibeta : int
+ Radix in which numbers are represented.
+ it : int
+ Number of base-`ibeta` digits in the floating point mantissa M.
+ machep : int
+ Exponent of the smallest (most negative) power of `ibeta` that,
+ added to 1.0, gives something different from 1.0
+ eps : float
+ Floating-point number ``beta**machep`` (floating point precision)
+ negep : int
+ Exponent of the smallest power of `ibeta` that, subtracted
+ from 1.0, gives something different from 1.0.
+ epsneg : float
+ Floating-point number ``beta**negep``.
+ iexp : int
+ Number of bits in the exponent (including its sign and bias).
+ minexp : int
+ Smallest (most negative) power of `ibeta` consistent with there
+ being no leading zeros in the mantissa.
+ xmin : float
+ Floating-point number ``beta**minexp`` (the smallest [in
+ magnitude] positive floating point number with full precision).
+ maxexp : int
+ Smallest (positive) power of `ibeta` that causes overflow.
+ xmax : float
+ ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
+ usable floating value).
+ irnd : int
+ In ``range(6)``, information on what kind of rounding is done
+ in addition, and on how underflow is handled.
+ ngrd : int
+ Number of 'guard digits' used when truncating the product
+ of two mantissas to fit the representation.
+ epsilon : float
+ Same as `eps`.
+ tiny : float
+ An alias for `smallest_normal`, kept for backwards compatibility.
+ huge : float
+ Same as `xmax`.
+ precision : float
+ ``- int(-log10(eps))``
+ resolution : float
+ ``- 10**(-precision)``
+ smallest_normal : float
+ The smallest positive floating point number with 1 as leading bit in
+ the mantissa following IEEE-754. Same as `xmin`.
+ smallest_subnormal : float
+ The smallest positive floating point number with 0 as leading bit in
+ the mantissa following IEEE-754.
+
+ Parameters
+ ----------
+ float_conv : function, optional
+ Function that converts an integer or integer array to a float
+ or float array. Default is `float`.
+ int_conv : function, optional
+ Function that converts a float or float array to an integer or
+ integer array. Default is `int`.
+ float_to_float : function, optional
+ Function that converts a float array to float. Default is `float`.
+ Note that this does not seem to do anything useful in the current
+ implementation.
+ float_to_str : function, optional
+ Function that converts a single float to a string. Default is
+ ``lambda v:'%24.16e' %v``.
+ title : str, optional
+ Title that is printed in the string representation of `MachAr`.
+
+ See Also
+ --------
+ finfo : Machine limits for floating point types.
+ iinfo : Machine limits for integer types.
+
+ References
+ ----------
+ .. [1] Press, Teukolsky, Vetterling and Flannery,
+ "Numerical Recipes in C++," 2nd ed,
+ Cambridge University Press, 2002, p. 31.
+
+ """
+
+ def __init__(self, float_conv=float, int_conv=int,
+ float_to_float=float,
+ float_to_str=lambda v: f'{v:24.16e}',
+ title='Python floating point number'):
+ """
+
+ float_conv - convert integer to float (array)
+ int_conv - convert float (array) to integer
+ float_to_float - convert float array to float
+ float_to_str - convert array float to str
+ title - description of used floating point numbers
+
+ """
+ # We ignore all errors here because we are purposely triggering
+ # underflow to detect the properties of the running arch.
+ with errstate(under='ignore'):
+ self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
+
+ def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
+ max_iterN = 10000
+ msg = "Did not converge after %d tries with %s"
+ one = float_conv(1)
+ two = one + one
+ zero = one - one
+
+ # Do we really need to do this? Aren't they 2 and 2.0?
+ # Determine ibeta and beta
+ a = one
+ for _ in range(max_iterN):
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ b = one
+ for _ in range(max_iterN):
+ b = b + b
+ temp = a + b
+ itemp = int_conv(temp - a)
+ if any(itemp != 0):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ ibeta = itemp
+ beta = float_conv(ibeta)
+
+ # Determine it and irnd
+ it = -1
+ b = one
+ for _ in range(max_iterN):
+ it = it + 1
+ b = b * beta
+ temp = b + one
+ temp1 = temp - b
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+
+ betah = beta / two
+ a = one
+ for _ in range(max_iterN):
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ temp = a + betah
+ irnd = 0
+ if any(temp - a != zero):
+ irnd = 1
+ tempa = a + beta
+ temp = tempa + betah
+ if irnd == 0 and any(temp - tempa != zero):
+ irnd = 2
+
+ # Determine negep and epsneg
+ negep = it + 3
+ betain = one / beta
+ a = one
+ for i in range(negep):
+ a = a * betain
+ b = a
+ for _ in range(max_iterN):
+ temp = one - a
+ if any(temp - one != zero):
+ break
+ a = a * beta
+ negep = negep - 1
+ # Prevent infinite loop on PPC with gcc 4.0:
+ if negep < 0:
+ raise RuntimeError("could not determine machine tolerance "
+ "for 'negep', locals() -> %s" % (locals()))
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ negep = -negep
+ epsneg = a
+
+ # Determine machep and eps
+ machep = - it - 3
+ a = b
+
+ for _ in range(max_iterN):
+ temp = one + a
+ if any(temp - one != zero):
+ break
+ a = a * beta
+ machep = machep + 1
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ eps = a
+
+ # Determine ngrd
+ ngrd = 0
+ temp = one + eps
+ if irnd == 0 and any(temp * one - one != zero):
+ ngrd = 1
+
+ # Determine iexp
+ i = 0
+ k = 1
+ z = betain
+ t = one + eps
+ nxres = 0
+ for _ in range(max_iterN):
+ y = z
+ z = y * y
+ a = z * one # Check here for underflow
+ temp = z * t
+ if any(a + a == zero) or any(abs(z) >= y):
+ break
+ temp1 = temp * betain
+ if any(temp1 * beta == z):
+ break
+ i = i + 1
+ k = k + k
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ if ibeta != 10:
+ iexp = i + 1
+ mx = k + k
+ else:
+ iexp = 2
+ iz = ibeta
+ while k >= iz:
+ iz = iz * ibeta
+ iexp = iexp + 1
+ mx = iz + iz - 1
+
+ # Determine minexp and xmin
+ for _ in range(max_iterN):
+ xmin = y
+ y = y * betain
+ a = y * one
+ temp = y * t
+ if any((a + a) != zero) and any(abs(y) < xmin):
+ k = k + 1
+ temp1 = temp * betain
+ if any(temp1 * beta == y) and any(temp != y):
+ nxres = 3
+ xmin = y
+ break
+ else:
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ minexp = -k
+
+ # Determine maxexp, xmax
+ if mx <= k + k - 3 and ibeta != 10:
+ mx = mx + mx
+ iexp = iexp + 1
+ maxexp = mx + minexp
+ irnd = irnd + nxres
+ if irnd >= 2:
+ maxexp = maxexp - 2
+ i = maxexp + minexp
+ if ibeta == 2 and not i:
+ maxexp = maxexp - 1
+ if i > 20:
+ maxexp = maxexp - 1
+ if any(a != y):
+ maxexp = maxexp - 2
+ xmax = one - epsneg
+ if any(xmax * one != xmax):
+ xmax = one - beta * epsneg
+ xmax = xmax / (xmin * beta * beta * beta)
+ i = maxexp + minexp + 3
+ for j in range(i):
+ if ibeta == 2:
+ xmax = xmax + xmax
+ else:
+ xmax = xmax * beta
+
+ smallest_subnormal = abs(xmin / beta ** (it))
+
+ self.ibeta = ibeta
+ self.it = it
+ self.negep = negep
+ self.epsneg = float_to_float(epsneg)
+ self._str_epsneg = float_to_str(epsneg)
+ self.machep = machep
+ self.eps = float_to_float(eps)
+ self._str_eps = float_to_str(eps)
+ self.ngrd = ngrd
+ self.iexp = iexp
+ self.minexp = minexp
+ self.xmin = float_to_float(xmin)
+ self._str_xmin = float_to_str(xmin)
+ self.maxexp = maxexp
+ self.xmax = float_to_float(xmax)
+ self._str_xmax = float_to_str(xmax)
+ self.irnd = irnd
+
+ self.title = title
+ # Commonly used parameters
+ self.epsilon = self.eps
+ self.tiny = self.xmin
+ self.huge = self.xmax
+ self.smallest_normal = self.xmin
+ self._str_smallest_normal = float_to_str(self.xmin)
+ self.smallest_subnormal = float_to_float(smallest_subnormal)
+ self._str_smallest_subnormal = float_to_str(smallest_subnormal)
+
+ import math
+ self.precision = int(-math.log10(float_to_float(self.eps)))
+ ten = two + two + two + two + two
+ resolution = ten ** (-self.precision)
+ self.resolution = float_to_float(resolution)
+ self._str_resolution = float_to_str(resolution)
+
+ def __str__(self):
+ fmt = (
+ 'Machine parameters for %(title)s\n'
+ '---------------------------------------------------------------------\n'
+ 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
+ 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
+ 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
+ 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
+ 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
+ 'smallest_normal=%(smallest_normal)s '
+ 'smallest_subnormal=%(smallest_subnormal)s\n'
+ '---------------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
+
+
+if __name__ == '__main__':
+ print(MachAr())
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_machar.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.pyi
new file mode 100644
index 00000000..02637a17
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_machar.pyi
@@ -0,0 +1,55 @@
+from collections.abc import Iterable
+from typing import Any, Final, TypeVar, overload
+
+import numpy as np
+from numpy import _CastingKind
+from numpy._utils import set_module as set_module
+
+###
+
+_T = TypeVar("_T")
+_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])
+_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
+
+###
+
+class UFuncTypeError(TypeError):
+ ufunc: Final[np.ufunc]
+ def __init__(self, /, ufunc: np.ufunc) -> None: ...
+
+class _UFuncNoLoopError(UFuncTypeError):
+ dtypes: tuple[np.dtype, ...]
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
+
+class _UFuncBinaryResolutionError(_UFuncNoLoopError):
+ dtypes: tuple[np.dtype, np.dtype]
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
+
+class _UFuncCastingError(UFuncTypeError):
+ casting: Final[_CastingKind]
+ from_: Final[np.dtype]
+ to: Final[np.dtype]
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...
+
+class _UFuncInputCastingError(_UFuncCastingError):
+ in_i: Final[int]
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
+
+class _UFuncOutputCastingError(_UFuncCastingError):
+ out_i: Final[int]
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
+
+class _ArrayMemoryError(MemoryError):
+ shape: tuple[int, ...]
+ dtype: np.dtype
+ def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...
+ @property
+ def _total_size(self) -> int: ...
+ @staticmethod
+ def _size_to_string(num_bytes: int) -> str: ...
+
+@overload
+def _unpack_tuple(tup: tuple[_T]) -> _T: ...
+@overload
+def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
+def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_methods.py b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.py
new file mode 100644
index 00000000..21ad7900
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.py
@@ -0,0 +1,255 @@
+"""
+Array methods which are called by both the C-code for the method
+and the Python code for the NumPy-namespace function
+
+"""
+import os
+import pickle
+import warnings
+from contextlib import nullcontext
+
+import numpy as np
+from numpy._core import multiarray as mu
+from numpy._core import numerictypes as nt
+from numpy._core import umath as um
+from numpy._core.multiarray import asanyarray
+from numpy._globals import _NoValue
+
+# save those O(100) nanoseconds!
+bool_dt = mu.dtype("bool")
+umr_maximum = um.maximum.reduce
+umr_minimum = um.minimum.reduce
+umr_sum = um.add.reduce
+umr_prod = um.multiply.reduce
+umr_bitwise_count = um.bitwise_count
+umr_any = um.logical_or.reduce
+umr_all = um.logical_and.reduce
+
+# Complex types to -> (2,)float view for fast-path computation in _var()
+_complex_to_float = {
+ nt.dtype(nt.csingle): nt.dtype(nt.single),
+ nt.dtype(nt.cdouble): nt.dtype(nt.double),
+}
+# Special case for windows: ensure double takes precedence
+if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
+ _complex_to_float.update({
+ nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble),
+ })
+
+# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
+# small reductions
+def _amax(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_maximum(a, axis, None, out, keepdims, initial, where)
+
+def _amin(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_minimum(a, axis, None, out, keepdims, initial, where)
+
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_sum(a, axis, dtype, out, keepdims, initial, where)
+
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_prod(a, axis, dtype, out, keepdims, initial, where)
+
+def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+ # By default, return a boolean for any and all
+ if dtype is None:
+ dtype = bool_dt
+ # Parsing keyword arguments is currently fairly slow, so avoid it for now
+ if where is True:
+ return umr_any(a, axis, dtype, out, keepdims)
+ return umr_any(a, axis, dtype, out, keepdims, where=where)
+
+def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+ # By default, return a boolean for any and all
+ if dtype is None:
+ dtype = bool_dt
+ # Parsing keyword arguments is currently fairly slow, so avoid it for now
+ if where is True:
+ return umr_all(a, axis, dtype, out, keepdims)
+ return umr_all(a, axis, dtype, out, keepdims, where=where)
+
+def _count_reduce_items(arr, axis, keepdims=False, where=True):
+ # fast-path for the default case
+ if where is True:
+ # no boolean mask given, calculate items according to axis
+ if axis is None:
+ axis = tuple(range(arr.ndim))
+ elif not isinstance(axis, tuple):
+ axis = (axis,)
+ items = 1
+ for ax in axis:
+ items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
+ items = nt.intp(items)
+ else:
+ # TODO: Optimize case when `where` is broadcast along a non-reduction
+ # axis and full sum is more excessive than needed.
+
+ # guarded to protect circular imports
+ from numpy.lib._stride_tricks_impl import broadcast_to
+ # count True values in (potentially broadcasted) boolean mask
+ items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
+ keepdims)
+ return items
+
+def _clip(a, min=None, max=None, out=None, **kwargs):
+ if a.dtype.kind in "iu":
+ # If min/max is a Python integer, deal with out-of-bound values here.
+ # (This enforces NEP 50 rules as no value based promotion is done.)
+ if type(min) is int and min <= np.iinfo(a.dtype).min:
+ min = None
+ if type(max) is int and max >= np.iinfo(a.dtype).max:
+ max = None
+
+ if min is None and max is None:
+ # return identity
+ return um.positive(a, out=out, **kwargs)
+ elif min is None:
+ return um.minimum(a, max, out=out, **kwargs)
+ elif max is None:
+ return um.maximum(a, min, out=out, **kwargs)
+ else:
+ return um.clip(a, min, max, out=out, **kwargs)
+
+def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+ arr = asanyarray(a)
+
+ is_float16_result = False
+
+ rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
+ if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
+ warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None:
+ if issubclass(arr.dtype.type, (nt.integer, nt.bool)):
+ dtype = mu.dtype('f8')
+ elif issubclass(arr.dtype.type, nt.float16):
+ dtype = mu.dtype('f4')
+ is_float16_result = True
+
+ ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
+ if isinstance(ret, mu.ndarray):
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
+ if is_float16_result and out is None:
+ ret = arr.dtype.type(ret)
+ elif hasattr(ret, 'dtype'):
+ if is_float16_result:
+ ret = arr.dtype.type(ret / rcount)
+ else:
+ ret = ret.dtype.type(ret / rcount)
+ else:
+ ret = ret / rcount
+
+ return ret
+
+def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
+ where=True, mean=None):
+ arr = asanyarray(a)
+
+ rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
+ # Make this warning show up on top.
+ if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
+ warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
+ stacklevel=2)
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)):
+ dtype = mu.dtype('f8')
+
+ if mean is not None:
+ arrmean = mean
+ else:
+ # Compute the mean.
+ # Note that if dtype is not of inexact type then arraymean will
+ # not be either.
+ arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
+ # The shape of rcount has to match arrmean to not change the shape of
+ # out in broadcasting. Otherwise, it cannot be stored back to arrmean.
+ if rcount.ndim == 0:
+ # fast-path for default case when where is True
+ div = rcount
+ else:
+ # matching rcount to arrmean when where is specified as array
+ div = rcount.reshape(arrmean.shape)
+ if isinstance(arrmean, mu.ndarray):
+ arrmean = um.true_divide(arrmean, div, out=arrmean,
+ casting='unsafe', subok=False)
+ elif hasattr(arrmean, "dtype"):
+ arrmean = arrmean.dtype.type(arrmean / rcount)
+ else:
+ arrmean = arrmean / rcount
+
+ # Compute sum of squared deviations from mean
+ # Note that x may not be inexact and that we need it to be an array,
+ # not a scalar.
+ x = asanyarray(arr - arrmean)
+
+ if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
+ x = um.multiply(x, x, out=x)
+ # Fast-paths for built-in complex types
+ elif x.dtype in _complex_to_float:
+ xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
+ um.multiply(xv, xv, out=xv)
+ x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
+ # Most general case; includes handling object arrays containing imaginary
+ # numbers and complex types with non-native byteorder
+ else:
+ x = um.multiply(x, um.conjugate(x), out=x).real
+
+ ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
+
+ # Compute degrees of freedom and make sure it is not negative.
+ rcount = um.maximum(rcount - ddof, 0)
+
+ # divide by degrees of freedom
+ if isinstance(ret, mu.ndarray):
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
+ elif hasattr(ret, 'dtype'):
+ ret = ret.dtype.type(ret / rcount)
+ else:
+ ret = ret / rcount
+
+ return ret
+
+def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
+ where=True, mean=None):
+ ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ keepdims=keepdims, where=where, mean=mean)
+
+ if isinstance(ret, mu.ndarray):
+ ret = um.sqrt(ret, out=ret)
+ elif hasattr(ret, 'dtype'):
+ ret = ret.dtype.type(um.sqrt(ret))
+ else:
+ ret = um.sqrt(ret)
+
+ return ret
+
+def _ptp(a, axis=None, out=None, keepdims=False):
+ return um.subtract(
+ umr_maximum(a, axis, None, out, keepdims),
+ umr_minimum(a, axis, None, None, keepdims),
+ out
+ )
+
+def _dump(self, file, protocol=2):
+ if hasattr(file, 'write'):
+ ctx = nullcontext(file)
+ else:
+ ctx = open(os.fspath(file), "wb")
+ with ctx as f:
+ pickle.dump(self, f, protocol=protocol)
+
+def _dumps(self, protocol=2):
+ return pickle.dumps(self, protocol=protocol)
+
+def _bitwise_count(a, out=None, *, where=True, casting='same_kind',
+ order='K', dtype=None, subok=True):
+ return umr_bitwise_count(a, out, where=where, casting=casting,
+ order=order, dtype=dtype, subok=subok)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_methods.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.pyi
new file mode 100644
index 00000000..3c80683f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_methods.pyi
@@ -0,0 +1,22 @@
+from collections.abc import Callable
+from typing import Any, Concatenate, TypeAlias
+
+import numpy as np
+
+from . import _exceptions as _exceptions
+
+###
+
+_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any]
+
+###
+
+bool_dt: np.dtype[np.bool] = ...
+umr_maximum: _Reduce2 = ...
+umr_minimum: _Reduce2 = ...
+umr_sum: _Reduce2 = ...
+umr_prod: _Reduce2 = ...
+umr_bitwise_count = np.bitwise_count
+umr_any: _Reduce2 = ...
+umr_all: _Reduce2 = ...
+_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so
new file mode 100755
index 00000000..ce78182e
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so
new file mode 100755
index 00000000..45073346
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so
new file mode 100755
index 00000000..bf24fe0d
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so
new file mode 100755
index 00000000..b9a77173
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so
new file mode 100755
index 00000000..54bbb837
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_simd.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.pyi
new file mode 100644
index 00000000..70bb7077
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_simd.pyi
@@ -0,0 +1,25 @@
+from types import ModuleType
+from typing import TypedDict, type_check_only
+
+# NOTE: these 5 are only defined on systems with an intel processor
+SSE42: ModuleType | None = ...
+FMA3: ModuleType | None = ...
+AVX2: ModuleType | None = ...
+AVX512F: ModuleType | None = ...
+AVX512_SKX: ModuleType | None = ...
+
+baseline: ModuleType | None = ...
+
+@type_check_only
+class SimdTargets(TypedDict):
+ SSE42: ModuleType | None
+ AVX2: ModuleType | None
+ FMA3: ModuleType | None
+ AVX512F: ModuleType | None
+ AVX512_SKX: ModuleType | None
+ baseline: ModuleType | None
+
+targets: SimdTargets = ...
+
+def clear_floatstatus() -> None: ...
+def get_floatstatus() -> int: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.py b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.py
new file mode 100644
index 00000000..87085d41
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.py
@@ -0,0 +1,100 @@
+"""
+String-handling utilities to avoid locale-dependence.
+
+Used primarily to generate type name aliases.
+"""
+# "import string" is costly to import!
+# Construct the translation tables directly
+# "A" = chr(65), "a" = chr(97)
+_all_chars = tuple(map(chr, range(256)))
+_ascii_upper = _all_chars[65:65 + 26]
+_ascii_lower = _all_chars[97:97 + 26]
+LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:]
+UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:]
+
+
+def english_lower(s):
+ """ Apply English case rules to convert ASCII strings to all lower case.
+
+ This is an internal utility function to replace calls to str.lower() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ lowered : str
+
+ Examples
+ --------
+ >>> from numpy._core.numerictypes import english_lower
+ >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
+ >>> english_lower('')
+ ''
+ """
+ lowered = s.translate(LOWER_TABLE)
+ return lowered
+
+
+def english_upper(s):
+ """ Apply English case rules to convert ASCII strings to all upper case.
+
+ This is an internal utility function to replace calls to str.upper() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ uppered : str
+
+ Examples
+ --------
+ >>> from numpy._core.numerictypes import english_upper
+ >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+ >>> english_upper('')
+ ''
+ """
+ uppered = s.translate(UPPER_TABLE)
+ return uppered
+
+
+def english_capitalize(s):
+ """ Apply English case rules to convert the first character of an ASCII
+ string to upper case.
+
+ This is an internal utility function to replace calls to str.capitalize()
+ such that we can avoid changing behavior with changing locales.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ capitalized : str
+
+ Examples
+ --------
+ >>> from numpy._core.numerictypes import english_capitalize
+ >>> english_capitalize('int8')
+ 'Int8'
+ >>> english_capitalize('Int8')
+ 'Int8'
+ >>> english_capitalize('')
+ ''
+ """
+ if s:
+ return english_upper(s[0]) + s[1:]
+ else:
+ return s
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.pyi
new file mode 100644
index 00000000..6a85832b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_string_helpers.pyi
@@ -0,0 +1,12 @@
+from typing import Final
+
+_all_chars: Final[tuple[str, ...]] = ...
+_ascii_upper: Final[tuple[str, ...]] = ...
+_ascii_lower: Final[tuple[str, ...]] = ...
+
+LOWER_TABLE: Final[tuple[str, ...]] = ...
+UPPER_TABLE: Final[tuple[str, ...]] = ...
+
+def english_lower(s: str) -> str: ...
+def english_upper(s: str) -> str: ...
+def english_capitalize(s: str) -> str: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so
new file mode 100755
index 00000000..10747a36
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.py b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.py
new file mode 100644
index 00000000..de6c3095
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.py
@@ -0,0 +1,119 @@
+"""
+Due to compatibility, numpy has a very large number of different naming
+conventions for the scalar types (those subclassing from `numpy.generic`).
+This file produces a convoluted set of dictionaries mapping names to types,
+and sometimes other mappings too.
+
+.. data:: allTypes
+ A dictionary of names to types that will be exposed as attributes through
+ ``np._core.numerictypes.*``
+
+.. data:: sctypeDict
+ Similar to `allTypes`, but maps a broader set of aliases to their types.
+
+.. data:: sctypes
+ A dictionary keyed by a "type group" string, providing a list of types
+ under that group.
+
+"""
+
+import numpy._core.multiarray as ma
+from numpy._core.multiarray import dtype, typeinfo
+
+######################################
+# Building `sctypeDict` and `allTypes`
+######################################
+
+sctypeDict = {}
+allTypes = {}
+c_names_dict = {}
+
+_abstract_type_names = {
+ "generic", "integer", "inexact", "floating", "number",
+ "flexible", "character", "complexfloating", "unsignedinteger",
+ "signedinteger"
+}
+
+for _abstract_type_name in _abstract_type_names:
+ allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name)
+
+for k, v in typeinfo.items():
+ if k.startswith("NPY_") and v not in c_names_dict:
+ c_names_dict[k[4:]] = v
+ else:
+ concrete_type = v.type
+ allTypes[k] = concrete_type
+ sctypeDict[k] = concrete_type
+
+_aliases = {
+ "double": "float64",
+ "cdouble": "complex128",
+ "single": "float32",
+ "csingle": "complex64",
+ "half": "float16",
+ "bool_": "bool",
+ # Default integer:
+ "int_": "intp",
+ "uint": "uintp",
+}
+
+for k, v in _aliases.items():
+ sctypeDict[k] = allTypes[v]
+ allTypes[k] = allTypes[v]
+
+# extra aliases are added only to `sctypeDict`
+# to support dtype name access, such as`np.dtype("float")`
+_extra_aliases = {
+ "float": "float64",
+ "complex": "complex128",
+ "object": "object_",
+ "bytes": "bytes_",
+ "a": "bytes_",
+ "int": "int_",
+ "str": "str_",
+ "unicode": "str_",
+}
+
+for k, v in _extra_aliases.items():
+ sctypeDict[k] = allTypes[v]
+
+# include extended precision sized aliases
+for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]:
+ longdouble_type: type = allTypes[full_name]
+
+ bits: int = dtype(longdouble_type).itemsize * 8
+ base_name: str = "complex" if is_complex else "float"
+ extended_prec_name: str = f"{base_name}{bits}"
+ if extended_prec_name not in allTypes:
+ sctypeDict[extended_prec_name] = longdouble_type
+ allTypes[extended_prec_name] = longdouble_type
+
+
+####################
+# Building `sctypes`
+####################
+
+sctypes = {"int": set(), "uint": set(), "float": set(),
+ "complex": set(), "others": set()}
+
+for type_info in typeinfo.values():
+ if type_info.kind in ["M", "m"]: # exclude timedelta and datetime
+ continue
+
+ concrete_type = type_info.type
+
+ # find proper group for each concrete type
+ for type_group, abstract_type in [
+ ("int", ma.signedinteger), ("uint", ma.unsignedinteger),
+ ("float", ma.floating), ("complex", ma.complexfloating),
+ ("others", ma.generic)
+ ]:
+ if issubclass(concrete_type, abstract_type):
+ sctypes[type_group].add(concrete_type)
+ break
+
+# sort sctype groups by bitsize
+for sctype_key in sctypes.keys():
+ sctype_list = list(sctypes[sctype_key])
+ sctype_list.sort(key=lambda x: dtype(x).itemsize)
+ sctypes[sctype_key] = sctype_list
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.pyi
new file mode 100644
index 00000000..3c9dac7a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_type_aliases.pyi
@@ -0,0 +1,97 @@
+from collections.abc import Collection
+from typing import Final, TypeAlias, TypedDict, type_check_only
+from typing import Literal as L
+
+import numpy as np
+
+__all__ = (
+ "_abstract_type_names",
+ "_aliases",
+ "_extra_aliases",
+ "allTypes",
+ "c_names_dict",
+ "sctypeDict",
+ "sctypes",
+)
+
+sctypeDict: Final[dict[str, type[np.generic]]]
+allTypes: Final[dict[str, type[np.generic]]]
+
+@type_check_only
+class _CNamesDict(TypedDict):
+ BOOL: np.dtype[np.bool]
+ HALF: np.dtype[np.half]
+ FLOAT: np.dtype[np.single]
+ DOUBLE: np.dtype[np.double]
+ LONGDOUBLE: np.dtype[np.longdouble]
+ CFLOAT: np.dtype[np.csingle]
+ CDOUBLE: np.dtype[np.cdouble]
+ CLONGDOUBLE: np.dtype[np.clongdouble]
+ STRING: np.dtype[np.bytes_]
+ UNICODE: np.dtype[np.str_]
+ VOID: np.dtype[np.void]
+ OBJECT: np.dtype[np.object_]
+ DATETIME: np.dtype[np.datetime64]
+ TIMEDELTA: np.dtype[np.timedelta64]
+ BYTE: np.dtype[np.byte]
+ UBYTE: np.dtype[np.ubyte]
+ SHORT: np.dtype[np.short]
+ USHORT: np.dtype[np.ushort]
+ INT: np.dtype[np.intc]
+ UINT: np.dtype[np.uintc]
+ LONG: np.dtype[np.long]
+ ULONG: np.dtype[np.ulong]
+ LONGLONG: np.dtype[np.longlong]
+ ULONGLONG: np.dtype[np.ulonglong]
+
+c_names_dict: Final[_CNamesDict]
+
+_AbstractTypeName: TypeAlias = L[
+ "generic",
+ "flexible",
+ "character",
+ "number",
+ "integer",
+ "inexact",
+ "unsignedinteger",
+ "signedinteger",
+ "floating",
+ "complexfloating",
+]
+_abstract_type_names: Final[set[_AbstractTypeName]]
+
+@type_check_only
+class _AliasesType(TypedDict):
+ double: L["float64"]
+ cdouble: L["complex128"]
+ single: L["float32"]
+ csingle: L["complex64"]
+ half: L["float16"]
+ bool_: L["bool"]
+ int_: L["intp"]
+ uint: L["intp"]
+
+_aliases: Final[_AliasesType]
+
+@type_check_only
+class _ExtraAliasesType(TypedDict):
+ float: L["float64"]
+ complex: L["complex128"]
+ object: L["object_"]
+ bytes: L["bytes_"]
+ a: L["bytes_"]
+ int: L["int_"]
+ str: L["str_"]
+ unicode: L["str_"]
+
+_extra_aliases: Final[_ExtraAliasesType]
+
+@type_check_only
+class _SCTypes(TypedDict):
+ int: Collection[type[np.signedinteger]]
+ uint: Collection[type[np.unsignedinteger]]
+ float: Collection[type[np.floating]]
+ complex: Collection[type[np.complexfloating]]
+ others: Collection[type[np.flexible | np.bool | np.object_]]
+
+sctypes: Final[_SCTypes]
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.py b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.py
new file mode 100644
index 00000000..24abecd2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.py
@@ -0,0 +1,489 @@
+"""
+Functions for changing global ufunc configuration
+
+This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and
+`_extobj_contextvar` from umath.
+"""
+import functools
+
+from numpy._utils import set_module
+
+from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj
+
+__all__ = [
+ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
+ "errstate"
+]
+
+
+@set_module('numpy')
+def seterr(all=None, divide=None, over=None, under=None, invalid=None):
+ """
+ Set how floating-point errors are handled.
+
+ Note that operations on integer scalar types (such as `int16`) are
+ handled like floating point, and are affected by these settings.
+
+ Parameters
+ ----------
+ all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Set treatment for all types of floating-point errors at once:
+
+ - ignore: Take no action when the exception occurs.
+ - warn: Print a :exc:`RuntimeWarning` (via the Python `warnings`
+ module).
+ - raise: Raise a :exc:`FloatingPointError`.
+ - call: Call a function specified using the `seterrcall` function.
+ - print: Print a warning directly to ``stdout``.
+ - log: Record error in a Log object specified by `seterrcall`.
+
+ The default is not to change the current behavior.
+ divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for division by zero.
+ over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point overflow.
+ under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point underflow.
+ invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for invalid floating-point operation.
+
+ Returns
+ -------
+ old_settings : dict
+ Dictionary containing the old settings.
+
+ See also
+ --------
+ seterrcall : Set a callback function for the 'call' mode.
+ geterr, geterrcall, errstate
+
+ Notes
+ -----
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
+
+ - Division by zero: infinite result obtained from finite numbers.
+ - Overflow: result too large to be expressed.
+ - Underflow: result so close to zero that some precision
+ was lost.
+ - Invalid operation: result is not an expressible number, typically
+ indicates that a NaN was produced.
+
+ .. [1] https://en.wikipedia.org/wiki/IEEE_754
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> orig_settings = np.seterr(all='ignore') # seterr to known value
+ >>> np.int16(32000) * np.int16(3)
+ np.int16(30464)
+ >>> np.seterr(over='raise')
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
+ >>> old_settings = np.seterr(all='warn', over='raise')
+ >>> np.int16(32000) * np.int16(3)
+ Traceback (most recent call last):
+ File "", line 1, in
+ FloatingPointError: overflow encountered in scalar multiply
+
+ >>> old_settings = np.seterr(all='print')
+ >>> np.geterr()
+ {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
+ >>> np.int16(32000) * np.int16(3)
+ np.int16(30464)
+ >>> np.seterr(**orig_settings) # restore original
+ {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
+
+ """
+
+ old = _get_extobj_dict()
+ # The errstate doesn't include call and bufsize, so pop them:
+ old.pop("call", None)
+ old.pop("bufsize", None)
+
+ extobj = _make_extobj(
+ all=all, divide=divide, over=over, under=under, invalid=invalid)
+ _extobj_contextvar.set(extobj)
+ return old
+
+
+@set_module('numpy')
+def geterr():
+ """
+ Get the current way of handling floating-point errors.
+
+ Returns
+ -------
+ res : dict
+ A dictionary with keys "divide", "over", "under", and "invalid",
+ whose values are from the strings "ignore", "print", "log", "warn",
+ "raise", and "call". The keys represent possible floating-point
+ exceptions, and the values define how these exceptions are handled.
+
+ See Also
+ --------
+ geterrcall, seterr, seterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.geterr()
+ {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
+ >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP
+ array([nan, 1., 1.])
+ RuntimeWarning: invalid value encountered in divide
+
+ >>> oldsettings = np.seterr(all='warn', invalid='raise')
+ >>> np.geterr()
+ {'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'}
+ >>> np.arange(3.) / np.arange(3.)
+ Traceback (most recent call last):
+ ...
+ FloatingPointError: invalid value encountered in divide
+ >>> oldsettings = np.seterr(**oldsettings) # restore original
+
+ """
+ res = _get_extobj_dict()
+ # The "geterr" doesn't include call and bufsize,:
+ res.pop("call", None)
+ res.pop("bufsize", None)
+ return res
+
+
+@set_module('numpy')
+def setbufsize(size):
+ """
+ Set the size of the buffer used in ufuncs.
+
+ .. versionchanged:: 2.0
+ The scope of setting the buffer is tied to the `numpy.errstate`
+ context. Exiting a ``with errstate():`` will also restore the bufsize.
+
+ Parameters
+ ----------
+ size : int
+ Size of buffer.
+
+ Returns
+ -------
+ bufsize : int
+ Previous size of ufunc buffer in bytes.
+
+ Examples
+ --------
+ When exiting a `numpy.errstate` context manager the bufsize is restored:
+
+ >>> import numpy as np
+ >>> with np.errstate():
+ ... np.setbufsize(4096)
+ ... print(np.getbufsize())
+ ...
+ 8192
+ 4096
+ >>> np.getbufsize()
+ 8192
+
+ """
+ old = _get_extobj_dict()["bufsize"]
+ extobj = _make_extobj(bufsize=size)
+ _extobj_contextvar.set(extobj)
+ return old
+
+
+@set_module('numpy')
+def getbufsize():
+ """
+ Return the size of the buffer used in ufuncs.
+
+ Returns
+ -------
+ getbufsize : int
+ Size of ufunc buffer in bytes.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.getbufsize()
+ 8192
+
+ """
+ return _get_extobj_dict()["bufsize"]
+
+
+@set_module('numpy')
+def seterrcall(func):
+ """
+ Set the floating-point error callback function or log object.
+
+ There are two ways to capture floating-point error messages. The first
+ is to set the error-handler to 'call', using `seterr`. Then, set
+ the function to call using this function.
+
+ The second is to set the error-handler to 'log', using `seterr`.
+ Floating-point errors then trigger a call to the 'write' method of
+ the provided object.
+
+ Parameters
+ ----------
+ func : callable f(err, flag) or object with write method
+ Function to call upon floating-point errors ('call'-mode) or
+ object whose 'write' method is used to log such message ('log'-mode).
+
+ The call function takes two arguments. The first is a string describing
+ the type of error (such as "divide by zero", "overflow", "underflow",
+ or "invalid value"), and the second is the status flag. The flag is a
+ byte, whose four least-significant bits indicate the type of error, one
+ of "divide", "over", "under", "invalid"::
+
+ [0 0 0 0 divide over under invalid]
+
+ In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
+
+ If an object is provided, its write method should take one argument,
+ a string.
+
+ Returns
+ -------
+ h : callable, log instance or None
+ The old error handler.
+
+ See Also
+ --------
+ seterr, geterr, geterrcall
+
+ Examples
+ --------
+ Callback upon error:
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+
+ >>> import numpy as np
+
+ >>> orig_handler = np.seterrcall(err_handler)
+ >>> orig_err = np.seterr(all='call')
+
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([inf, inf, inf])
+
+ >>> np.seterrcall(orig_handler)
+
+ >>> np.seterr(**orig_err)
+ {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
+
+ Log error message:
+
+ >>> class Log:
+ ... def write(self, msg):
+ ... print("LOG: %s" % msg)
+ ...
+
+ >>> log = Log()
+ >>> saved_handler = np.seterrcall(log)
+ >>> save_err = np.seterr(all='log')
+
+ >>> np.array([1, 2, 3]) / 0.0
+ LOG: Warning: divide by zero encountered in divide
+ array([inf, inf, inf])
+
+ >>> np.seterrcall(orig_handler)
+
+ >>> np.seterr(**orig_err)
+ {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
+
+ """
+ old = _get_extobj_dict()["call"]
+ extobj = _make_extobj(call=func)
+ _extobj_contextvar.set(extobj)
+ return old
+
+
+@set_module('numpy')
+def geterrcall():
+ """
+ Return the current callback function used on floating-point errors.
+
+ When the error handling for a floating-point error (one of "divide",
+ "over", "under", or "invalid") is set to 'call' or 'log', the function
+ that is called or the log instance that is written to is returned by
+ `geterrcall`. This function or log instance has been set with
+ `seterrcall`.
+
+ Returns
+ -------
+ errobj : callable, log instance or None
+ The current error handler. If no handler was set through `seterrcall`,
+ ``None`` is returned.
+
+ See Also
+ --------
+ seterrcall, seterr, geterr
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.geterrcall() # we did not yet set a handler, returns None
+
+ >>> orig_settings = np.seterr(all='call')
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ >>> old_handler = np.seterrcall(err_handler)
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([inf, inf, inf])
+
+ >>> cur_handler = np.geterrcall()
+ >>> cur_handler is err_handler
+ True
+ >>> old_settings = np.seterr(**orig_settings) # restore original
+ >>> old_handler = np.seterrcall(None) # restore original
+
+ """
+ return _get_extobj_dict()["call"]
+
+
+class _unspecified:
+ pass
+
+
+_Unspecified = _unspecified()
+
+
+@set_module('numpy')
+class errstate:
+ """
+ errstate(**kwargs)
+
+ Context manager for floating-point error handling.
+
+ Using an instance of `errstate` as a context manager allows statements in
+ that context to execute with a known error handling behavior. Upon entering
+ the context the error handling is set with `seterr` and `seterrcall`, and
+ upon exiting it is reset to what it was before.
+
+ .. versionchanged:: 1.17.0
+ `errstate` is also usable as a function decorator, saving
+ a level of indentation if an entire function is wrapped.
+
+ .. versionchanged:: 2.0
+ `errstate` is now fully thread and asyncio safe, but may not be
+ entered more than once.
+ It is not safe to decorate async functions using ``errstate``.
+
+ Parameters
+ ----------
+ kwargs : {divide, over, under, invalid}
+ Keyword arguments. The valid keywords are the possible floating-point
+ exceptions. Each keyword should have a string value that defines the
+ treatment for the particular error. Possible values are
+ {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
+
+ See Also
+ --------
+ seterr, geterr, seterrcall, geterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> olderr = np.seterr(all='ignore') # Set error handling to known state.
+
+ >>> np.arange(3) / 0.
+ array([nan, inf, inf])
+ >>> with np.errstate(divide='ignore'):
+ ... np.arange(3) / 0.
+ array([nan, inf, inf])
+
+ >>> np.sqrt(-1)
+ np.float64(nan)
+ >>> with np.errstate(invalid='raise'):
+ ... np.sqrt(-1)
+ Traceback (most recent call last):
+ File "", line 2, in
+ FloatingPointError: invalid value encountered in sqrt
+
+ Outside the context the error handling behavior has not changed:
+
+ >>> np.geterr()
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
+ >>> olderr = np.seterr(**olderr) # restore original state
+
+ """
+ __slots__ = (
+ "_all",
+ "_call",
+ "_divide",
+ "_invalid",
+ "_over",
+ "_token",
+ "_under",
+ )
+
+ def __init__(self, *, call=_Unspecified,
+ all=None, divide=None, over=None, under=None, invalid=None):
+ self._token = None
+ self._call = call
+ self._all = all
+ self._divide = divide
+ self._over = over
+ self._under = under
+ self._invalid = invalid
+
+ def __enter__(self):
+ # Note that __call__ duplicates much of this logic
+ if self._token is not None:
+ raise TypeError("Cannot enter `np.errstate` twice.")
+ if self._call is _Unspecified:
+ extobj = _make_extobj(
+ all=self._all, divide=self._divide, over=self._over,
+ under=self._under, invalid=self._invalid)
+ else:
+ extobj = _make_extobj(
+ call=self._call,
+ all=self._all, divide=self._divide, over=self._over,
+ under=self._under, invalid=self._invalid)
+
+ self._token = _extobj_contextvar.set(extobj)
+
+ def __exit__(self, *exc_info):
+ _extobj_contextvar.reset(self._token)
+
+ def __call__(self, func):
+ # We need to customize `__call__` compared to `ContextDecorator`
+ # because we must store the token per-thread so cannot store it on
+ # the instance (we could create a new instance for this).
+ # This duplicates the code from `__enter__`.
+ @functools.wraps(func)
+ def inner(*args, **kwargs):
+ if self._call is _Unspecified:
+ extobj = _make_extobj(
+ all=self._all, divide=self._divide, over=self._over,
+ under=self._under, invalid=self._invalid)
+ else:
+ extobj = _make_extobj(
+ call=self._call,
+ all=self._all, divide=self._divide, over=self._over,
+ under=self._under, invalid=self._invalid)
+
+ _token = _extobj_contextvar.set(extobj)
+ try:
+ # Call the original, decorated, function:
+ return func(*args, **kwargs)
+ finally:
+ _extobj_contextvar.reset(_token)
+
+ return inner
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.pyi
new file mode 100644
index 00000000..1a661315
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/_ufunc_config.pyi
@@ -0,0 +1,32 @@
+from collections.abc import Callable
+from typing import Any, Literal, TypeAlias, TypedDict, type_check_only
+
+from _typeshed import SupportsWrite
+
+from numpy import errstate as errstate
+
+_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"]
+_ErrFunc: TypeAlias = Callable[[str, int], Any]
+_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str]
+
+@type_check_only
+class _ErrDict(TypedDict):
+ divide: _ErrKind
+ over: _ErrKind
+ under: _ErrKind
+ invalid: _ErrKind
+
+def seterr(
+ all: _ErrKind | None = ...,
+ divide: _ErrKind | None = ...,
+ over: _ErrKind | None = ...,
+ under: _ErrKind | None = ...,
+ invalid: _ErrKind | None = ...,
+) -> _ErrDict: ...
+def geterr() -> _ErrDict: ...
+def setbufsize(size: int) -> int: ...
+def getbufsize() -> int: ...
+def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ...
+def geterrcall() -> _ErrCall | None: ...
+
+# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so
new file mode 100755
index 00000000..51212e83
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.py b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.py
new file mode 100644
index 00000000..2a684280
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.py
@@ -0,0 +1,1775 @@
+"""Array printing function
+
+$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
+
+"""
+__all__ = ["array2string", "array_str", "array_repr",
+ "set_printoptions", "get_printoptions", "printoptions",
+ "format_float_positional", "format_float_scientific"]
+__docformat__ = 'restructuredtext'
+
+#
+# Written by Konrad Hinsen
+# last revision: 1996-3-13
+# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
+# and by Perry Greenfield 2000-4-1 for numarray
+# and by Travis Oliphant 2005-8-22 for numpy
+
+
+# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
+# scalars but for different purposes. scalartypes.c.src has str/reprs for when
+# the scalar is printed on its own, while arrayprint.py has strs for when
+# scalars are printed inside an ndarray. Only the latter strs are currently
+# user-customizable.
+
+import functools
+import numbers
+import sys
+
+try:
+ from _thread import get_ident
+except ImportError:
+ from _dummy_thread import get_ident
+
+import contextlib
+import operator
+import warnings
+
+import numpy as np
+
+from . import numerictypes as _nt
+from .fromnumeric import any
+from .multiarray import (
+ array,
+ datetime_as_string,
+ datetime_data,
+ dragon4_positional,
+ dragon4_scientific,
+ ndarray,
+)
+from .numeric import asarray, concatenate, errstate
+from .numerictypes import complex128, flexible, float64, int_
+from .overrides import array_function_dispatch, set_module
+from .printoptions import format_options
+from .umath import absolute, isfinite, isinf, isnat
+
+
+def _make_options_dict(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
+ sign=None, formatter=None, floatmode=None, legacy=None,
+ override_repr=None):
+ """
+ Make a dictionary out of the non-None arguments, plus conversion of
+ *legacy* and sanity checks.
+ """
+
+ options = {k: v for k, v in list(locals().items()) if v is not None}
+
+ if suppress is not None:
+ options['suppress'] = bool(suppress)
+
+ modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
+ if floatmode not in modes + [None]:
+ raise ValueError("floatmode option must be one of " +
+ ", ".join(f'"{m}"' for m in modes))
+
+ if sign not in [None, '-', '+', ' ']:
+ raise ValueError("sign option must be one of ' ', '+', or '-'")
+
+ if legacy is False:
+ options['legacy'] = sys.maxsize
+ elif legacy == False: # noqa: E712
+ warnings.warn(
+ f"Passing `legacy={legacy!r}` is deprecated.",
+ FutureWarning, stacklevel=3
+ )
+ options['legacy'] = sys.maxsize
+ elif legacy == '1.13':
+ options['legacy'] = 113
+ elif legacy == '1.21':
+ options['legacy'] = 121
+ elif legacy == '1.25':
+ options['legacy'] = 125
+ elif legacy == '2.1':
+ options['legacy'] = 201
+ elif legacy == '2.2':
+ options['legacy'] = 202
+ elif legacy is None:
+ pass # OK, do nothing.
+ else:
+ warnings.warn(
+ "legacy printing option can currently only be '1.13', '1.21', "
+ "'1.25', '2.1', '2.2' or `False`", stacklevel=3)
+
+ if threshold is not None:
+ # forbid the bad threshold arg suggested by stack overflow, gh-12351
+ if not isinstance(threshold, numbers.Number):
+ raise TypeError("threshold must be numeric")
+ if np.isnan(threshold):
+ raise ValueError("threshold must be non-NAN, try "
+ "sys.maxsize for untruncated representation")
+
+ if precision is not None:
+ # forbid the bad precision arg as suggested by issue #18254
+ try:
+ options['precision'] = operator.index(precision)
+ except TypeError as e:
+ raise TypeError('precision must be an integer') from e
+
+ return options
+
+
+@set_module('numpy')
+def set_printoptions(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None,
+ infstr=None, formatter=None, sign=None, floatmode=None,
+ *, legacy=None, override_repr=None):
+ """
+ Set printing options.
+
+ These options determine the way floating point numbers, arrays and
+ other NumPy objects are displayed.
+
+ Parameters
+ ----------
+ precision : int or None, optional
+ Number of digits of precision for floating point output (default 8).
+ May be None if `floatmode` is not `fixed`, to print as many digits as
+ necessary to uniquely specify the value.
+ threshold : int, optional
+ Total number of array elements which trigger summarization
+ rather than full repr (default 1000).
+ To always use the full repr without summarization, pass `sys.maxsize`.
+ edgeitems : int, optional
+ Number of array items in summary at beginning and end of
+ each dimension (default 3).
+ linewidth : int, optional
+ The number of characters per line for the purpose of inserting
+ line breaks (default 75).
+ suppress : bool, optional
+ If True, always print floating point numbers using fixed point
+ notation, in which case numbers equal to zero in the current precision
+ will print as zero. If False, then scientific notation is used when
+ absolute value of the smallest number is < 1e-4 or the ratio of the
+ maximum absolute value to the minimum is > 1e3. The default is False.
+ nanstr : str, optional
+ String representation of floating point not-a-number (default nan).
+ infstr : str, optional
+ String representation of floating point infinity (default inf).
+ sign : string, either '-', '+', or ' ', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values. (default '-')
+
+ .. versionchanged:: 2.0
+ The sign parameter can now be an integer type, previously
+ types were floating-point types.
+
+ formatter : dict of callables, optional
+ If not None, the keys should indicate the type(s) that the respective
+ formatting function applies to. Callables should return a string.
+ Types that are not specified (by their corresponding keys) are handled
+ by the default formatters. Individual types for which a formatter
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'numpystr' : types `numpy.bytes_` and `numpy.str_`
+ - 'object' : `np.object_` arrays
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'numpystr'
+ floatmode : str, optional
+ Controls the interpretation of the `precision` option for
+ floating-point types. Can take the following values
+ (default maxprec_equal):
+
+ * 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ * 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ * 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ * 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
+ legacy : string or `False`, optional
+ If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This
+ approximates numpy 1.13 print output by including a space in the sign
+ position of floats and different behavior for 0d arrays. This also
+ enables 1.21 legacy printing mode (described below).
+
+ If set to the string ``'1.21'`` enables 1.21 legacy printing mode. This
+ approximates numpy 1.21 print output of complex structured dtypes
+ by not inserting spaces after commas that separate fields and after
+ colons.
+
+ If set to ``'1.25'`` approximates printing of 1.25 which mainly means
+ that numeric scalars are printed without their type information, e.g.
+ as ``3.0`` rather than ``np.float64(3.0)``.
+
+ If set to ``'2.1'``, shape information is not given when arrays are
+ summarized (i.e., multiple elements replaced with ``...``).
+
+ If set to ``'2.2'``, the transition to use scientific notation for
+ printing ``np.float16`` and ``np.float32`` types may happen later or
+ not at all for larger values.
+
+ If set to `False`, disables legacy mode.
+
+ Unrecognized strings will be ignored with a warning for forward
+ compatibility.
+
+ .. versionchanged:: 1.22.0
+ .. versionchanged:: 2.2
+
+ override_repr: callable, optional
+ If set a passed function will be used for generating arrays' repr.
+ Other options will be ignored.
+
+ See Also
+ --------
+ get_printoptions, printoptions, array2string
+
+ Notes
+ -----
+ `formatter` is always reset with a call to `set_printoptions`.
+
+ Use `printoptions` as a context manager to set the values temporarily.
+
+ Examples
+ --------
+ Floating point precision can be set:
+
+ >>> import numpy as np
+ >>> np.set_printoptions(precision=4)
+ >>> np.array([1.123456789])
+ [1.1235]
+
+ Long arrays can be summarised:
+
+ >>> np.set_printoptions(threshold=5)
+ >>> np.arange(10)
+ array([0, 1, 2, ..., 7, 8, 9], shape=(10,))
+
+ Small results can be suppressed:
+
+ >>> eps = np.finfo(float).eps
+ >>> x = np.arange(4.)
+ >>> x**2 - (x + eps)**2
+ array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
+ >>> np.set_printoptions(suppress=True)
+ >>> x**2 - (x + eps)**2
+ array([-0., -0., 0., 0.])
+
+ A custom formatter can be used to display array elements as desired:
+
+ >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
+ >>> x = np.arange(3)
+ >>> x
+ array([int: 0, int: -1, int: -2])
+ >>> np.set_printoptions() # formatter gets reset
+ >>> x
+ array([0, 1, 2])
+
+ To put back the default options, you can use:
+
+ >>> np.set_printoptions(edgeitems=3, infstr='inf',
+ ... linewidth=75, nanstr='nan', precision=8,
+ ... suppress=False, threshold=1000, formatter=None)
+
+ Also to temporarily override options, use `printoptions`
+ as a context manager:
+
+ >>> with np.printoptions(precision=2, suppress=True, threshold=5):
+ ... np.linspace(0, 10, 10)
+ array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,))
+
+ """
+ _set_printoptions(precision, threshold, edgeitems, linewidth, suppress,
+ nanstr, infstr, formatter, sign, floatmode,
+ legacy=legacy, override_repr=override_repr)
+
+
+def _set_printoptions(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None,
+ infstr=None, formatter=None, sign=None, floatmode=None,
+ *, legacy=None, override_repr=None):
+ new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
+ suppress, nanstr, infstr, sign, formatter,
+ floatmode, legacy)
+ # formatter and override_repr are always reset
+ new_opt['formatter'] = formatter
+ new_opt['override_repr'] = override_repr
+
+ updated_opt = format_options.get() | new_opt
+ updated_opt.update(new_opt)
+
+ if updated_opt['legacy'] == 113:
+ updated_opt['sign'] = '-'
+
+ return format_options.set(updated_opt)
+
+
+@set_module('numpy')
+def get_printoptions():
+ """
+ Return the current print options.
+
+ Returns
+ -------
+ print_opts : dict
+ Dictionary of current print options with keys
+
+ - precision : int
+ - threshold : int
+ - edgeitems : int
+ - linewidth : int
+ - suppress : bool
+ - nanstr : str
+ - infstr : str
+ - sign : str
+ - formatter : dict of callables
+ - floatmode : str
+ - legacy : str or False
+
+ For a full description of these options, see `set_printoptions`.
+
+ See Also
+ --------
+ set_printoptions, printoptions
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> np.get_printoptions()
+ {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None}
+
+ >>> np.get_printoptions()['linewidth']
+ 75
+ >>> np.set_printoptions(linewidth=100)
+ >>> np.get_printoptions()['linewidth']
+ 100
+
+ """
+ opts = format_options.get().copy()
+ opts['legacy'] = {
+ 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1',
+ 202: '2.2', sys.maxsize: False,
+ }[opts['legacy']]
+ return opts
+
+
+def _get_legacy_print_mode():
+ """Return the legacy print mode as an int."""
+ return format_options.get()['legacy']
+
+
+@set_module('numpy')
+@contextlib.contextmanager
+def printoptions(*args, **kwargs):
+ """Context manager for setting print options.
+
+ Set print options for the scope of the `with` block, and restore the old
+ options at the end. See `set_printoptions` for the full description of
+ available options.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> from numpy.testing import assert_equal
+ >>> with np.printoptions(precision=2):
+ ... np.array([2.0]) / 3
+ array([0.67])
+
+ The `as`-clause of the `with`-statement gives the current print options:
+
+ >>> with np.printoptions(precision=2) as opts:
+ ... assert_equal(opts, np.get_printoptions())
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ """
+ token = _set_printoptions(*args, **kwargs)
+
+ try:
+ yield get_printoptions()
+ finally:
+ format_options.reset(token)
+
+
+def _leading_trailing(a, edgeitems, index=()):
+ """
+ Keep only the N-D corners (leading and trailing edges) of an array.
+
+ Should be passed a base-class ndarray, since it makes no guarantees about
+ preserving subclasses.
+ """
+ axis = len(index)
+ if axis == a.ndim:
+ return a[index]
+
+ if a.shape[axis] > 2 * edgeitems:
+ return concatenate((
+ _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]),
+ _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
+ ), axis=axis)
+ else:
+ return _leading_trailing(a, edgeitems, index + np.index_exp[:])
+
+
+def _object_format(o):
+ """ Object arrays containing lists should be printed unambiguously """
+ if type(o) is list:
+ fmt = 'list({!r})'
+ else:
+ fmt = '{!r}'
+ return fmt.format(o)
+
+def repr_format(x):
+ if isinstance(x, (np.str_, np.bytes_)):
+ return repr(x.item())
+ return repr(x)
+
+def str_format(x):
+ if isinstance(x, (np.str_, np.bytes_)):
+ return str(x.item())
+ return str(x)
+
+def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
+ formatter, **kwargs):
+ # note: extra arguments in kwargs are ignored
+
+ # wrapped in lambdas to avoid taking a code path
+ # with the wrong type of data
+ formatdict = {
+ 'bool': lambda: BoolFormat(data),
+ 'int': lambda: IntegerFormat(data, sign),
+ 'float': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longfloat': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'complexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longcomplexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'datetime': lambda: DatetimeFormat(data, legacy=legacy),
+ 'timedelta': lambda: TimedeltaFormat(data),
+ 'object': lambda: _object_format,
+ 'void': lambda: str_format,
+ 'numpystr': lambda: repr_format}
+
+ # we need to wrap values in `formatter` in a lambda, so that the interface
+ # is the same as the above values.
+ def indirect(x):
+ return lambda: x
+
+ if formatter is not None:
+ fkeys = [k for k in formatter.keys() if formatter[k] is not None]
+ if 'all' in fkeys:
+ for key in formatdict.keys():
+ formatdict[key] = indirect(formatter['all'])
+ if 'int_kind' in fkeys:
+ for key in ['int']:
+ formatdict[key] = indirect(formatter['int_kind'])
+ if 'float_kind' in fkeys:
+ for key in ['float', 'longfloat']:
+ formatdict[key] = indirect(formatter['float_kind'])
+ if 'complex_kind' in fkeys:
+ for key in ['complexfloat', 'longcomplexfloat']:
+ formatdict[key] = indirect(formatter['complex_kind'])
+ if 'str_kind' in fkeys:
+ formatdict['numpystr'] = indirect(formatter['str_kind'])
+ for key in formatdict.keys():
+ if key in fkeys:
+ formatdict[key] = indirect(formatter[key])
+
+ return formatdict
+
+def _get_format_function(data, **options):
+ """
+ find the right formatting function for the dtype_
+ """
+ dtype_ = data.dtype
+ dtypeobj = dtype_.type
+ formatdict = _get_formatdict(data, **options)
+ if dtypeobj is None:
+ return formatdict["numpystr"]()
+ elif issubclass(dtypeobj, _nt.bool):
+ return formatdict['bool']()
+ elif issubclass(dtypeobj, _nt.integer):
+ if issubclass(dtypeobj, _nt.timedelta64):
+ return formatdict['timedelta']()
+ else:
+ return formatdict['int']()
+ elif issubclass(dtypeobj, _nt.floating):
+ if issubclass(dtypeobj, _nt.longdouble):
+ return formatdict['longfloat']()
+ else:
+ return formatdict['float']()
+ elif issubclass(dtypeobj, _nt.complexfloating):
+ if issubclass(dtypeobj, _nt.clongdouble):
+ return formatdict['longcomplexfloat']()
+ else:
+ return formatdict['complexfloat']()
+ elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)):
+ return formatdict['numpystr']()
+ elif issubclass(dtypeobj, _nt.datetime64):
+ return formatdict['datetime']()
+ elif issubclass(dtypeobj, _nt.object_):
+ return formatdict['object']()
+ elif issubclass(dtypeobj, _nt.void):
+ if dtype_.names is not None:
+ return StructuredVoidFormat.from_data(data, **options)
+ else:
+ return formatdict['void']()
+ else:
+ return formatdict['numpystr']()
+
+
+def _recursive_guard(fillvalue='...'):
+ """
+ Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
+
+ Decorates a function such that if it calls itself with the same first
+ argument, it returns `fillvalue` instead of recursing.
+
+ Largely copied from reprlib.recursive_repr
+ """
+
+ def decorating_function(f):
+ repr_running = set()
+
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ key = id(self), get_ident()
+ if key in repr_running:
+ return fillvalue
+ repr_running.add(key)
+ try:
+ return f(self, *args, **kwargs)
+ finally:
+ repr_running.discard(key)
+
+ return wrapper
+
+ return decorating_function
+
+
+# gracefully handle recursive calls, when object arrays contain themselves
+@_recursive_guard()
+def _array2string(a, options, separator=' ', prefix=""):
+ # The formatter __init__s in _get_format_function cannot deal with
+ # subclasses yet, and we also need to avoid recursion issues in
+ # _formatArray with subclasses which return 0d arrays in place of scalars
+ data = asarray(a)
+ if a.shape == ():
+ a = data
+
+ if a.size > options['threshold']:
+ summary_insert = "..."
+ data = _leading_trailing(data, options['edgeitems'])
+ else:
+ summary_insert = ""
+
+ # find the right formatting function for the array
+ format_function = _get_format_function(data, **options)
+
+ # skip over "["
+ next_line_prefix = " "
+ # skip over array(
+ next_line_prefix += " " * len(prefix)
+
+ lst = _formatArray(a, format_function, options['linewidth'],
+ next_line_prefix, separator, options['edgeitems'],
+ summary_insert, options['legacy'])
+ return lst
+
+
+def _array2string_dispatcher(
+ a, max_line_width=None, precision=None,
+ suppress_small=None, separator=None, prefix=None,
+ style=None, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix=None,
+ *, legacy=None):
+ return (a,)
+
+
+@array_function_dispatch(_array2string_dispatcher, module='numpy')
+def array2string(a, max_line_width=None, precision=None,
+ suppress_small=None, separator=' ', prefix="",
+ style=np._NoValue, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix="",
+ *, legacy=None):
+ """
+ Return a string representation of an array.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
+ precision : int or None, optional
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+ separator : str, optional
+ Inserted between elements.
+ prefix : str, optional
+ suffix : str, optional
+ The length of the prefix and suffix strings are used to respectively
+ align and wrap the output. An array is typically printed as::
+
+ prefix + array2string(a) + suffix
+
+ The output is left-padded by the length of the prefix string, and
+ wrapping is forced at the column ``max_line_width - len(suffix)``.
+ It should be noted that the content of prefix and suffix strings are
+ not included in the output.
+ style : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.14.0
+ formatter : dict of callables, optional
+ If not None, the keys should indicate the type(s) that the respective
+ formatting function applies to. Callables should return a string.
+ Types that are not specified (by their corresponding keys) are handled
+ by the default formatters. Individual types for which a formatter
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'void' : type `numpy.void`
+ - 'numpystr' : types `numpy.bytes_` and `numpy.str_`
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'numpystr'
+ threshold : int, optional
+ Total number of array elements which trigger summarization
+ rather than full repr.
+ Defaults to ``numpy.get_printoptions()['threshold']``.
+ edgeitems : int, optional
+ Number of array items in summary at beginning and end of
+ each dimension.
+ Defaults to ``numpy.get_printoptions()['edgeitems']``.
+ sign : string, either '-', '+', or ' ', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values.
+ Defaults to ``numpy.get_printoptions()['sign']``.
+
+ .. versionchanged:: 2.0
+ The sign parameter can now be an integer type, previously
+ types were floating-point types.
+
+ floatmode : str, optional
+ Controls the interpretation of the `precision` option for
+ floating-point types.
+ Defaults to ``numpy.get_printoptions()['floatmode']``.
+ Can take the following values:
+
+ - 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ - 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ - 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ - 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
+ legacy : string or `False`, optional
+ If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This
+ approximates numpy 1.13 print output by including a space in the sign
+ position of floats and different behavior for 0d arrays. If set to
+ `False`, disables legacy mode. Unrecognized strings will be ignored
+ with a warning for forward compatibility.
+
+ Returns
+ -------
+ array_str : str
+ String representation of the array.
+
+ Raises
+ ------
+ TypeError
+ if a callable in `formatter` does not return a string.
+
+ See Also
+ --------
+ array_str, array_repr, set_printoptions, get_printoptions
+
+ Notes
+ -----
+ If a formatter is specified for a certain type, the `precision` keyword is
+ ignored for that type.
+
+ This is a very flexible function; `array_repr` and `array_str` are using
+ `array2string` internally so keywords with the same name should work
+ identically in all three functions.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1e-16,1,2,3])
+ >>> np.array2string(x, precision=2, separator=',',
+ ... suppress_small=True)
+ '[0.,1.,2.,3.]'
+
+ >>> x = np.arange(3.)
+ >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
+ '[0.00 1.00 2.00]'
+
+ >>> x = np.arange(3)
+ >>> np.array2string(x, formatter={'int':lambda x: hex(x)})
+ '[0x0 0x1 0x2]'
+
+ """
+
+ overrides = _make_options_dict(precision, threshold, edgeitems,
+ max_line_width, suppress_small, None, None,
+ sign, formatter, floatmode, legacy)
+ options = format_options.get().copy()
+ options.update(overrides)
+
+ if options['legacy'] <= 113:
+ if style is np._NoValue:
+ style = repr
+
+ if a.shape == () and a.dtype.names is None:
+ return style(a.item())
+ elif style is not np._NoValue:
+ # Deprecation 11-9-2017 v1.14
+ warnings.warn("'style' argument is deprecated and no longer functional"
+ " except in 1.13 'legacy' mode",
+ DeprecationWarning, stacklevel=2)
+
+ if options['legacy'] > 113:
+ options['linewidth'] -= len(suffix)
+
+ # treat as a null array if any of shape elements == 0
+ if a.size == 0:
+ return "[]"
+
+ return _array2string(a, options, separator, prefix)
+
+
+def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
+ needs_wrap = len(line) + len(word) > line_width
+ if legacy > 113:
+ # don't wrap lines if it won't help
+ if len(line) <= len(next_line_prefix):
+ needs_wrap = False
+
+ if needs_wrap:
+ s += line.rstrip() + "\n"
+ line = next_line_prefix
+ line += word
+ return s, line
+
+
+def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
+ """
+ Extends line with nicely formatted (possibly multi-line) string ``word``.
+ """
+ words = word.splitlines()
+ if len(words) == 1 or legacy <= 113:
+ return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
+
+ max_word_length = max(len(word) for word in words)
+ if (len(line) + max_word_length > line_width and
+ len(line) > len(next_line_prefix)):
+ s += line.rstrip() + '\n'
+ line = next_line_prefix + words[0]
+ indent = next_line_prefix
+ else:
+ indent = len(line) * ' '
+ line += words[0]
+
+ for word in words[1::]:
+ s += line.rstrip() + '\n'
+ line = indent + word
+
+ suffix_length = max_word_length - len(words[-1])
+ line += suffix_length * ' '
+
+ return s, line
+
+def _formatArray(a, format_function, line_width, next_line_prefix,
+ separator, edge_items, summary_insert, legacy):
+ """formatArray is designed for two modes of operation:
+
+ 1. Full output
+
+ 2. Summarized output
+
+ """
+ def recurser(index, hanging_indent, curr_width):
+ """
+ By using this local function, we don't need to recurse with all the
+ arguments. Since this function is not created recursively, the cost is
+ not significant
+ """
+ axis = len(index)
+ axes_left = a.ndim - axis
+
+ if axes_left == 0:
+ return format_function(a[index])
+
+ # when recursing, add a space to align with the [ added, and reduce the
+ # length of the line by 1
+ next_hanging_indent = hanging_indent + ' '
+ if legacy <= 113:
+ next_width = curr_width
+ else:
+ next_width = curr_width - len(']')
+
+ a_len = a.shape[axis]
+ show_summary = summary_insert and 2 * edge_items < a_len
+ if show_summary:
+ leading_items = edge_items
+ trailing_items = edge_items
+ else:
+ leading_items = 0
+ trailing_items = a_len
+
+ # stringify the array with the hanging indent on the first line too
+ s = ''
+
+ # last axis (rows) - wrap elements if they would not fit on one line
+ if axes_left == 1:
+ # the length up until the beginning of the separator / bracket
+ if legacy <= 113:
+ elem_width = curr_width - len(separator.rstrip())
+ else:
+ elem_width = curr_width - max(
+ len(separator.rstrip()), len(']')
+ )
+
+ line = hanging_indent
+ for i in range(leading_items):
+ word = recurser(index + (i,), next_hanging_indent, next_width)
+ s, line = _extendLine_pretty(
+ s, line, word, elem_width, hanging_indent, legacy)
+ line += separator
+
+ if show_summary:
+ s, line = _extendLine(
+ s, line, summary_insert, elem_width, hanging_indent, legacy
+ )
+ if legacy <= 113:
+ line += ", "
+ else:
+ line += separator
+
+ for i in range(trailing_items, 1, -1):
+ word = recurser(index + (-i,), next_hanging_indent, next_width)
+ s, line = _extendLine_pretty(
+ s, line, word, elem_width, hanging_indent, legacy)
+ line += separator
+
+ if legacy <= 113:
+ # width of the separator is not considered on 1.13
+ elem_width = curr_width
+ word = recurser(index + (-1,), next_hanging_indent, next_width)
+ s, line = _extendLine_pretty(
+ s, line, word, elem_width, hanging_indent, legacy)
+
+ s += line
+
+ # other axes - insert newlines between rows
+ else:
+ s = ''
+ line_sep = separator.rstrip() + '\n' * (axes_left - 1)
+
+ for i in range(leading_items):
+ nested = recurser(
+ index + (i,), next_hanging_indent, next_width
+ )
+ s += hanging_indent + nested + line_sep
+
+ if show_summary:
+ if legacy <= 113:
+ # trailing space, fixed nbr of newlines,
+ # and fixed separator
+ s += hanging_indent + summary_insert + ", \n"
+ else:
+ s += hanging_indent + summary_insert + line_sep
+
+ for i in range(trailing_items, 1, -1):
+ nested = recurser(index + (-i,), next_hanging_indent,
+ next_width)
+ s += hanging_indent + nested + line_sep
+
+ nested = recurser(index + (-1,), next_hanging_indent, next_width)
+ s += hanging_indent + nested
+
+ # remove the hanging indent, and wrap in []
+ s = '[' + s[len(hanging_indent):] + ']'
+ return s
+
+ try:
+ # invoke the recursive part with an initial index and prefix
+ return recurser(index=(),
+ hanging_indent=next_line_prefix,
+ curr_width=line_width)
+ finally:
+ # recursive closures have a cyclic reference to themselves, which
+ # requires gc to collect (gh-10620). To avoid this problem, for
+ # performance and PyPy friendliness, we break the cycle:
+ recurser = None
+
+def _none_or_positive_arg(x, name):
+ if x is None:
+ return -1
+ if x < 0:
+ raise ValueError(f"{name} must be >= 0")
+ return x
+
+class FloatingFormat:
+ """ Formatter for subtypes of np.floating """
+ def __init__(self, data, precision, floatmode, suppress_small, sign=False,
+ *, legacy=None):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ self._legacy = legacy
+ if self._legacy <= 113:
+ # when not 0d, legacy does not support '-'
+ if data.shape != () and sign == '-':
+ sign = ' '
+
+ self.floatmode = floatmode
+ if floatmode == 'unique':
+ self.precision = None
+ else:
+ self.precision = precision
+
+ self.precision = _none_or_positive_arg(self.precision, 'precision')
+
+ self.suppress_small = suppress_small
+ self.sign = sign
+ self.exp_format = False
+ self.large_exponent = False
+ self.fillFormat(data)
+
+ def fillFormat(self, data):
+ # only the finite values are used to compute the number of digits
+ finite_vals = data[isfinite(data)]
+
+ # choose exponential mode based on the non-zero finite values:
+ abs_non_zero = absolute(finite_vals[finite_vals != 0])
+ if len(abs_non_zero) != 0:
+ max_val = np.max(abs_non_zero)
+ min_val = np.min(abs_non_zero)
+ if self._legacy <= 202:
+ exp_cutoff_max = 1.e8
+ else:
+ # consider data type while deciding the max cutoff for exp format
+ exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision)
+ with errstate(over='ignore'): # division can overflow
+ if max_val >= exp_cutoff_max or (not self.suppress_small and
+ (min_val < 0.0001 or max_val / min_val > 1000.)):
+ self.exp_format = True
+
+ # do a first pass of printing all the numbers, to determine sizes
+ if len(finite_vals) == 0:
+ self.pad_left = 0
+ self.pad_right = 0
+ self.trim = '.'
+ self.exp_size = -1
+ self.unique = True
+ self.min_digits = None
+ elif self.exp_format:
+ trim, unique = '.', True
+ if self.floatmode == 'fixed' or self._legacy <= 113:
+ trim, unique = 'k', False
+ strs = (dragon4_scientific(x, precision=self.precision,
+ unique=unique, trim=trim, sign=self.sign == '+')
+ for x in finite_vals)
+ frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
+ int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
+ self.exp_size = max(len(s) for s in exp_strs) - 1
+
+ self.trim = 'k'
+ self.precision = max(len(s) for s in frac_part)
+ self.min_digits = self.precision
+ self.unique = unique
+
+ # for back-compat with np 1.13, use 2 spaces & sign and full prec
+ if self._legacy <= 113:
+ self.pad_left = 3
+ else:
+ # this should be only 1 or 2. Can be calculated from sign.
+ self.pad_left = max(len(s) for s in int_part)
+ # pad_right is only needed for nan length calculation
+ self.pad_right = self.exp_size + 2 + self.precision
+ else:
+ trim, unique = '.', True
+ if self.floatmode == 'fixed':
+ trim, unique = 'k', False
+ strs = (dragon4_positional(x, precision=self.precision,
+ fractional=True,
+ unique=unique, trim=trim,
+ sign=self.sign == '+')
+ for x in finite_vals)
+ int_part, frac_part = zip(*(s.split('.') for s in strs))
+ if self._legacy <= 113:
+ self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
+ else:
+ self.pad_left = max(len(s) for s in int_part)
+ self.pad_right = max(len(s) for s in frac_part)
+ self.exp_size = -1
+ self.unique = unique
+
+ if self.floatmode in ['fixed', 'maxprec_equal']:
+ self.precision = self.min_digits = self.pad_right
+ self.trim = 'k'
+ else:
+ self.trim = '.'
+ self.min_digits = 0
+
+ if self._legacy > 113:
+ # account for sign = ' ' by adding one to pad_left
+ if self.sign == ' ' and not any(np.signbit(finite_vals)):
+ self.pad_left += 1
+
+ # if there are non-finite values, may need to increase pad_left
+ if data.size != finite_vals.size:
+ neginf = self.sign != '-' or any(data[isinf(data)] < 0)
+ offset = self.pad_right + 1 # +1 for decimal pt
+ current_options = format_options.get()
+ self.pad_left = max(
+ self.pad_left, len(current_options['nanstr']) - offset,
+ len(current_options['infstr']) + neginf - offset
+ )
+
+ def __call__(self, x):
+ if not np.isfinite(x):
+ with errstate(invalid='ignore'):
+ current_options = format_options.get()
+ if np.isnan(x):
+ sign = '+' if self.sign == '+' else ''
+ ret = sign + current_options['nanstr']
+ else: # isinf
+ sign = '-' if x < 0 else '+' if self.sign == '+' else ''
+ ret = sign + current_options['infstr']
+ return ' ' * (
+ self.pad_left + self.pad_right + 1 - len(ret)
+ ) + ret
+
+ if self.exp_format:
+ return dragon4_scientific(x,
+ precision=self.precision,
+ min_digits=self.min_digits,
+ unique=self.unique,
+ trim=self.trim,
+ sign=self.sign == '+',
+ pad_left=self.pad_left,
+ exp_digits=self.exp_size)
+ else:
+ return dragon4_positional(x,
+ precision=self.precision,
+ min_digits=self.min_digits,
+ unique=self.unique,
+ fractional=True,
+ trim=self.trim,
+ sign=self.sign == '+',
+ pad_left=self.pad_left,
+ pad_right=self.pad_right)
+
+
+@set_module('numpy')
+def format_float_scientific(x, precision=None, unique=True, trim='k',
+ sign=False, pad_left=None, exp_digits=None,
+ min_digits=None):
+ """
+ Format a floating-point scalar as a decimal string in scientific notation.
+
+ Provides control over rounding, trimming and padding. Uses and assumes
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+ Parameters
+ ----------
+ x : python float or numpy floating scalar
+ Value to format.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
+ unique : boolean, optional
+ If `True`, use a digit-generation strategy which gives the shortest
+ representation which uniquely identifies the floating-point number from
+ other values of the same type, by judicious rounding. If `precision`
+ is given fewer digits than necessary can be printed. If `min_digits`
+ is given more can be printed, in which cases the last digit is rounded
+ with unbiased rounding.
+ If `False`, digits are generated as if printing an infinite-precision
+ value and stopping after `precision` digits, rounding the remaining
+ value with unbiased rounding
+ trim : one of 'k', '.', '0', '-', optional
+ Controls post-processing trimming of trailing digits, as follows:
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
+ sign : boolean, optional
+ Whether to show the sign for positive values.
+ pad_left : non-negative integer, optional
+ Pad the left side of the string with whitespace until at least that
+ many characters are to the left of the decimal point.
+ exp_digits : non-negative integer, optional
+ Pad the exponent with zeros until it contains at least this
+ many digits. If omitted, the exponent will be at least 2 digits.
+ min_digits : non-negative integer or None, optional
+ Minimum number of digits to print. This only has an effect for
+ `unique=True`. In that case more digits than necessary to uniquely
+ identify the value may be printed and rounded unbiased.
+
+ .. versionadded:: 1.21.0
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_positional
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.format_float_scientific(np.float32(np.pi))
+ '3.1415927e+00'
+ >>> s = np.float32(1.23e24)
+ >>> np.format_float_scientific(s, unique=False, precision=15)
+ '1.230000071797338e+24'
+ >>> np.format_float_scientific(s, exp_digits=4)
+ '1.23e+0024'
+ """
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
+ min_digits = _none_or_positive_arg(min_digits, 'min_digits')
+ if min_digits > 0 and precision > 0 and min_digits > precision:
+ raise ValueError("min_digits must be less than or equal to precision")
+ return dragon4_scientific(x, precision=precision, unique=unique,
+ trim=trim, sign=sign, pad_left=pad_left,
+ exp_digits=exp_digits, min_digits=min_digits)
+
+
+@set_module('numpy')
+def format_float_positional(x, precision=None, unique=True,
+ fractional=True, trim='k', sign=False,
+ pad_left=None, pad_right=None, min_digits=None):
+ """
+ Format a floating-point scalar as a decimal string in positional notation.
+
+ Provides control over rounding, trimming and padding. Uses and assumes
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+ Parameters
+ ----------
+ x : python float or numpy floating scalar
+ Value to format.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
+ unique : boolean, optional
+ If `True`, use a digit-generation strategy which gives the shortest
+ representation which uniquely identifies the floating-point number from
+ other values of the same type, by judicious rounding. If `precision`
+ is given fewer digits than necessary can be printed, or if `min_digits`
+ is given more can be printed, in which cases the last digit is rounded
+ with unbiased rounding.
+ If `False`, digits are generated as if printing an infinite-precision
+ value and stopping after `precision` digits, rounding the remaining
+ value with unbiased rounding
+ fractional : boolean, optional
+ If `True`, the cutoffs of `precision` and `min_digits` refer to the
+ total number of digits after the decimal point, including leading
+ zeros.
+ If `False`, `precision` and `min_digits` refer to the total number of
+ significant digits, before or after the decimal point, ignoring leading
+ zeros.
+ trim : one of 'k', '.', '0', '-', optional
+ Controls post-processing trimming of trailing digits, as follows:
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
+ sign : boolean, optional
+ Whether to show the sign for positive values.
+ pad_left : non-negative integer, optional
+ Pad the left side of the string with whitespace until at least that
+ many characters are to the left of the decimal point.
+ pad_right : non-negative integer, optional
+ Pad the right side of the string with whitespace until at least that
+ many characters are to the right of the decimal point.
+ min_digits : non-negative integer or None, optional
+ Minimum number of digits to print. Only has an effect if `unique=True`
+ in which case additional digits past those necessary to uniquely
+ identify the value may be printed, rounding the last additional digit.
+
+ .. versionadded:: 1.21.0
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_scientific
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.format_float_positional(np.float32(np.pi))
+ '3.1415927'
+ >>> np.format_float_positional(np.float16(np.pi))
+ '3.14'
+ >>> np.format_float_positional(np.float16(0.3))
+ '0.3'
+ >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
+ '0.3000488281'
+ """
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ pad_right = _none_or_positive_arg(pad_right, 'pad_right')
+ min_digits = _none_or_positive_arg(min_digits, 'min_digits')
+ if not fractional and precision == 0:
+ raise ValueError("precision must be greater than 0 if "
+ "fractional=False")
+ if min_digits > 0 and precision > 0 and min_digits > precision:
+ raise ValueError("min_digits must be less than or equal to precision")
+ return dragon4_positional(x, precision=precision, unique=unique,
+ fractional=fractional, trim=trim,
+ sign=sign, pad_left=pad_left,
+ pad_right=pad_right, min_digits=min_digits)
+
+class IntegerFormat:
+ def __init__(self, data, sign='-'):
+ if data.size > 0:
+ data_max = np.max(data)
+ data_min = np.min(data)
+ data_max_str_len = len(str(data_max))
+ if sign == ' ' and data_min < 0:
+ sign = '-'
+ if data_max >= 0 and sign in "+ ":
+ data_max_str_len += 1
+ max_str_len = max(data_max_str_len,
+ len(str(data_min)))
+ else:
+ max_str_len = 0
+ self.format = f'{{:{sign}{max_str_len}d}}'
+
+ def __call__(self, x):
+ return self.format.format(x)
+
+class BoolFormat:
+ def __init__(self, data, **kwargs):
+ # add an extra space so " True" and "False" have the same length and
+ # array elements align nicely when printed, except in 0d arrays
+ self.truestr = ' True' if data.shape != () else 'True'
+
+ def __call__(self, x):
+ return self.truestr if x else "False"
+
+
+class ComplexFloatingFormat:
+ """ Formatter for subtypes of np.complexfloating """
+ def __init__(self, x, precision, floatmode, suppress_small,
+ sign=False, *, legacy=None):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ floatmode_real = floatmode_imag = floatmode
+ if legacy <= 113:
+ floatmode_real = 'maxprec_equal'
+ floatmode_imag = 'maxprec'
+
+ self.real_format = FloatingFormat(
+ x.real, precision, floatmode_real, suppress_small,
+ sign=sign, legacy=legacy
+ )
+ self.imag_format = FloatingFormat(
+ x.imag, precision, floatmode_imag, suppress_small,
+ sign='+', legacy=legacy
+ )
+
+ def __call__(self, x):
+ r = self.real_format(x.real)
+ i = self.imag_format(x.imag)
+
+ # add the 'j' before the terminal whitespace in i
+ sp = len(i.rstrip())
+ i = i[:sp] + 'j' + i[sp:]
+
+ return r + i
+
+
+class _TimelikeFormat:
+ def __init__(self, data):
+ non_nat = data[~isnat(data)]
+ if len(non_nat) > 0:
+ # Max str length of non-NaT elements
+ max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
+ len(self._format_non_nat(np.min(non_nat))))
+ else:
+ max_str_len = 0
+ if len(non_nat) < data.size:
+ # data contains a NaT
+ max_str_len = max(max_str_len, 5)
+ self._format = f'%{max_str_len}s'
+ self._nat = "'NaT'".rjust(max_str_len)
+
+ def _format_non_nat(self, x):
+ # override in subclass
+ raise NotImplementedError
+
+ def __call__(self, x):
+ if isnat(x):
+ return self._nat
+ else:
+ return self._format % self._format_non_nat(x)
+
+
+class DatetimeFormat(_TimelikeFormat):
+ def __init__(self, x, unit=None, timezone=None, casting='same_kind',
+ legacy=False):
+ # Get the unit from the dtype
+ if unit is None:
+ if x.dtype.kind == 'M':
+ unit = datetime_data(x.dtype)[0]
+ else:
+ unit = 's'
+
+ if timezone is None:
+ timezone = 'naive'
+ self.timezone = timezone
+ self.unit = unit
+ self.casting = casting
+ self.legacy = legacy
+
+ # must be called after the above are configured
+ super().__init__(x)
+
+ def __call__(self, x):
+ if self.legacy <= 113:
+ return self._format_non_nat(x)
+ return super().__call__(x)
+
+ def _format_non_nat(self, x):
+ return "'%s'" % datetime_as_string(x,
+ unit=self.unit,
+ timezone=self.timezone,
+ casting=self.casting)
+
+
+class TimedeltaFormat(_TimelikeFormat):
+ def _format_non_nat(self, x):
+ return str(x.astype('i8'))
+
+
+class SubArrayFormat:
+ def __init__(self, format_function, **options):
+ self.format_function = format_function
+ self.threshold = options['threshold']
+ self.edge_items = options['edgeitems']
+
+ def __call__(self, a):
+ self.summary_insert = "..." if a.size > self.threshold else ""
+ return self.format_array(a)
+
+ def format_array(self, a):
+ if np.ndim(a) == 0:
+ return self.format_function(a)
+
+ if self.summary_insert and a.shape[0] > 2 * self.edge_items:
+ formatted = (
+ [self.format_array(a_) for a_ in a[:self.edge_items]]
+ + [self.summary_insert]
+ + [self.format_array(a_) for a_ in a[-self.edge_items:]]
+ )
+ else:
+ formatted = [self.format_array(a_) for a_ in a]
+
+ return "[" + ", ".join(formatted) + "]"
+
+
+class StructuredVoidFormat:
+ """
+ Formatter for structured np.void objects.
+
+ This does not work on structured alias types like
+ np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information,
+ and the implementation relies upon np.void.__getitem__.
+ """
+ def __init__(self, format_functions):
+ self.format_functions = format_functions
+
+ @classmethod
+ def from_data(cls, data, **options):
+ """
+ This is a second way to initialize StructuredVoidFormat,
+ using the raw data as input. Added to avoid changing
+ the signature of __init__.
+ """
+ format_functions = []
+ for field_name in data.dtype.names:
+ format_function = _get_format_function(data[field_name], **options)
+ if data.dtype[field_name].shape != ():
+ format_function = SubArrayFormat(format_function, **options)
+ format_functions.append(format_function)
+ return cls(format_functions)
+
+ def __call__(self, x):
+ str_fields = [
+ format_function(field)
+ for field, format_function in zip(x, self.format_functions)
+ ]
+ if len(str_fields) == 1:
+ return f"({str_fields[0]},)"
+ else:
+ return f"({', '.join(str_fields)})"
+
+
+def _void_scalar_to_string(x, is_repr=True):
+ """
+ Implements the repr for structured-void scalars. It is called from the
+ scalartypes.c.src code, and is placed here because it uses the elementwise
+ formatters defined above.
+ """
+ options = format_options.get().copy()
+
+ if options["legacy"] <= 125:
+ return StructuredVoidFormat.from_data(array(x), **options)(x)
+
+ if options.get('formatter') is None:
+ options['formatter'] = {}
+ options['formatter'].setdefault('float_kind', str)
+ val_repr = StructuredVoidFormat.from_data(array(x), **options)(x)
+ if not is_repr:
+ return val_repr
+ cls = type(x)
+ cls_fqn = cls.__module__.replace("numpy", "np") + "." + cls.__name__
+ void_dtype = np.dtype((np.void, x.dtype))
+ return f"{cls_fqn}({val_repr}, dtype={void_dtype!s})"
+
+
+_typelessdata = [int_, float64, complex128, _nt.bool]
+
+
+def dtype_is_implied(dtype):
+ """
+ Determine if the given dtype is implied by the representation
+ of its values.
+
+ Parameters
+ ----------
+ dtype : dtype
+ Data type
+
+ Returns
+ -------
+ implied : bool
+ True if the dtype is implied by the representation of its values.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np._core.arrayprint.dtype_is_implied(int)
+ True
+ >>> np.array([1, 2, 3], int)
+ array([1, 2, 3])
+ >>> np._core.arrayprint.dtype_is_implied(np.int8)
+ False
+ >>> np.array([1, 2, 3], np.int8)
+ array([1, 2, 3], dtype=int8)
+ """
+ dtype = np.dtype(dtype)
+ if format_options.get()['legacy'] <= 113 and dtype.type == np.bool:
+ return False
+
+ # not just void types can be structured, and names are not part of the repr
+ if dtype.names is not None:
+ return False
+
+ # should care about endianness *unless size is 1* (e.g., int8, bool)
+ if not dtype.isnative:
+ return False
+
+ return dtype.type in _typelessdata
+
+
+def dtype_short_repr(dtype):
+ """
+ Convert a dtype to a short form which evaluates to the same dtype.
+
+ The intent is roughly that the following holds
+
+ >>> from numpy import *
+ >>> dt = np.int64([1, 2]).dtype
+ >>> assert eval(dtype_short_repr(dt)) == dt
+ """
+ if type(dtype).__repr__ != np.dtype.__repr__:
+ # TODO: Custom repr for user DTypes, logic should likely move.
+ return repr(dtype)
+ if dtype.names is not None:
+ # structured dtypes give a list or tuple repr
+ return str(dtype)
+ elif issubclass(dtype.type, flexible):
+ # handle these separately so they don't give garbage like str256
+ return f"'{str(dtype)}'"
+
+ typename = dtype.name
+ if not dtype.isnative:
+ # deal with cases like dtype(' 210
+ and arr.size > current_options['threshold'])):
+ extras.append(f"shape={arr.shape}")
+ if not dtype_is_implied(arr.dtype) or arr.size == 0:
+ extras.append(f"dtype={dtype_short_repr(arr.dtype)}")
+
+ if not extras:
+ return prefix + lst + ")"
+
+ arr_str = prefix + lst + ","
+ extra_str = ", ".join(extras) + ")"
+ # compute whether we should put extras on a new line: Do so if adding the
+ # extras would extend the last line past max_line_width.
+ # Note: This line gives the correct result even when rfind returns -1.
+ last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
+ spacer = " "
+ if current_options['legacy'] <= 113:
+ if issubclass(arr.dtype.type, flexible):
+ spacer = '\n' + ' ' * len(prefix)
+ elif last_line_len + len(extra_str) + 1 > max_line_width:
+ spacer = '\n' + ' ' * len(prefix)
+
+ return arr_str + spacer + extra_str
+
+
+def _array_repr_dispatcher(
+ arr, max_line_width=None, precision=None, suppress_small=None):
+ return (arr,)
+
+
+@array_function_dispatch(_array_repr_dispatcher, module='numpy')
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return the string representation of an array.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
+ precision : int, optional
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+
+ Returns
+ -------
+ string : str
+ The string representation of an array.
+
+ See Also
+ --------
+ array_str, array2string, set_printoptions
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.array_repr(np.array([1,2]))
+ 'array([1, 2])'
+ >>> np.array_repr(np.ma.array([0.]))
+ 'MaskedArray([0.])'
+ >>> np.array_repr(np.array([], np.int32))
+ 'array([], dtype=int32)'
+
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([0.000001, 0. , 2. , 3. ])'
+
+ """
+ return _array_repr_implementation(
+ arr, max_line_width, precision, suppress_small)
+
+
+@_recursive_guard()
+def _guarded_repr_or_str(v):
+ if isinstance(v, bytes):
+ return repr(v)
+ return str(v)
+
+
+def _array_str_implementation(
+ a, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_str() that allows overriding array2string."""
+ if (format_options.get()['legacy'] <= 113 and
+ a.shape == () and not a.dtype.names):
+ return str(a.item())
+
+ # the str of 0d arrays is a special case: It should appear like a scalar,
+ # so floats are not truncated by `precision`, and strings are not wrapped
+ # in quotes. So we return the str of the scalar value.
+ if a.shape == ():
+ # obtain a scalar and call str on it, avoiding problems for subclasses
+ # for which indexing with () returns a 0d instead of a scalar by using
+ # ndarray's getindex. Also guard against recursive 0d object arrays.
+ return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
+
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+
+def _array_str_dispatcher(
+ a, max_line_width=None, precision=None, suppress_small=None):
+ return (a,)
+
+
+@array_function_dispatch(_array_str_dispatcher, module='numpy')
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function is
+ similar to `array_repr`, the difference being that `array_repr` also
+ returns information on the kind of array and its data type.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
+ precision : int, optional
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+
+ See Also
+ --------
+ array2string, array_repr, set_printoptions
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.array_str(np.arange(3))
+ '[0 1 2]'
+
+ """
+ return _array_str_implementation(
+ a, max_line_width, precision, suppress_small)
+
+
+# needed if __array_function__ is disabled
+_array2string_impl = getattr(array2string, '__wrapped__', array2string)
+_default_array_str = functools.partial(_array_str_implementation,
+ array2string=_array2string_impl)
+_default_array_repr = functools.partial(_array_repr_implementation,
+ array2string=_array2string_impl)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.pyi
new file mode 100644
index 00000000..fec03a6f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/arrayprint.pyi
@@ -0,0 +1,238 @@
+from collections.abc import Callable
+
+# Using a private class is by no means ideal, but it is simply a consequence
+# of a `contextlib.context` returning an instance of aforementioned class
+from contextlib import _GeneratorContextManager
+from typing import (
+ Any,
+ Final,
+ Literal,
+ SupportsIndex,
+ TypeAlias,
+ TypedDict,
+ overload,
+ type_check_only,
+)
+
+from typing_extensions import deprecated
+
+import numpy as np
+from numpy._globals import _NoValueType
+from numpy._typing import NDArray, _CharLike_co, _FloatLike_co
+
+__all__ = [
+ "array2string",
+ "array_repr",
+ "array_str",
+ "format_float_positional",
+ "format_float_scientific",
+ "get_printoptions",
+ "printoptions",
+ "set_printoptions",
+]
+
+###
+
+_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
+_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False]
+_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle]
+_Sign: TypeAlias = Literal["-", "+", " "]
+_Trim: TypeAlias = Literal["k", ".", "0", "-"]
+_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str]
+
+@type_check_only
+class _FormatDict(TypedDict, total=False):
+ bool: Callable[[np.bool], str]
+ int: Callable[[np.integer], str]
+ timedelta: Callable[[np.timedelta64], str]
+ datetime: Callable[[np.datetime64], str]
+ float: Callable[[np.floating], str]
+ longfloat: Callable[[np.longdouble], str]
+ complexfloat: Callable[[np.complexfloating], str]
+ longcomplexfloat: Callable[[np.clongdouble], str]
+ void: Callable[[np.void], str]
+ numpystr: Callable[[_CharLike_co], str]
+ object: Callable[[object], str]
+ all: Callable[[object], str]
+ int_kind: Callable[[np.integer], str]
+ float_kind: Callable[[np.floating], str]
+ complex_kind: Callable[[np.complexfloating], str]
+ str_kind: Callable[[_CharLike_co], str]
+
+@type_check_only
+class _FormatOptions(TypedDict):
+ precision: int
+ threshold: int
+ edgeitems: int
+ linewidth: int
+ suppress: bool
+ nanstr: str
+ infstr: str
+ formatter: _FormatDict | None
+ sign: _Sign
+ floatmode: _FloatMode
+ legacy: _Legacy
+
+###
+
+__docformat__: Final = "restructuredtext" # undocumented
+
+def set_printoptions(
+ precision: SupportsIndex | None = ...,
+ threshold: int | None = ...,
+ edgeitems: int | None = ...,
+ linewidth: int | None = ...,
+ suppress: bool | None = ...,
+ nanstr: str | None = ...,
+ infstr: str | None = ...,
+ formatter: _FormatDict | None = ...,
+ sign: _Sign | None = None,
+ floatmode: _FloatMode | None = None,
+ *,
+ legacy: _Legacy | None = None,
+ override_repr: _ReprFunc | None = None,
+) -> None: ...
+def get_printoptions() -> _FormatOptions: ...
+
+# public numpy export
+@overload # no style
+def array2string(
+ a: NDArray[Any],
+ max_line_width: int | None = None,
+ precision: SupportsIndex | None = None,
+ suppress_small: bool | None = None,
+ separator: str = " ",
+ prefix: str = "",
+ style: _NoValueType = ...,
+ formatter: _FormatDict | None = None,
+ threshold: int | None = None,
+ edgeitems: int | None = None,
+ sign: _Sign | None = None,
+ floatmode: _FloatMode | None = None,
+ suffix: str = "",
+ *,
+ legacy: _Legacy | None = None,
+) -> str: ...
+@overload # style= (positional), legacy="1.13"
+def array2string(
+ a: NDArray[Any],
+ max_line_width: int | None,
+ precision: SupportsIndex | None,
+ suppress_small: bool | None,
+ separator: str,
+ prefix: str,
+ style: _ReprFunc,
+ formatter: _FormatDict | None = None,
+ threshold: int | None = None,
+ edgeitems: int | None = None,
+ sign: _Sign | None = None,
+ floatmode: _FloatMode | None = None,
+ suffix: str = "",
+ *,
+ legacy: Literal["1.13"],
+) -> str: ...
+@overload # style= (keyword), legacy="1.13"
+def array2string(
+ a: NDArray[Any],
+ max_line_width: int | None = None,
+ precision: SupportsIndex | None = None,
+ suppress_small: bool | None = None,
+ separator: str = " ",
+ prefix: str = "",
+ *,
+ style: _ReprFunc,
+ formatter: _FormatDict | None = None,
+ threshold: int | None = None,
+ edgeitems: int | None = None,
+ sign: _Sign | None = None,
+ floatmode: _FloatMode | None = None,
+ suffix: str = "",
+ legacy: Literal["1.13"],
+) -> str: ...
+@overload # style= (positional), legacy!="1.13"
+@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
+def array2string(
+ a: NDArray[Any],
+ max_line_width: int | None,
+ precision: SupportsIndex | None,
+ suppress_small: bool | None,
+ separator: str,
+ prefix: str,
+ style: _ReprFunc,
+ formatter: _FormatDict | None = None,
+ threshold: int | None = None,
+ edgeitems: int | None = None,
+ sign: _Sign | None = None,
+ floatmode: _FloatMode | None = None,
+ suffix: str = "",
+ *,
+ legacy: _LegacyNoStyle | None = None,
+) -> str: ...
+@overload # style= (keyword), legacy="1.13"
+@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
+def array2string(
+ a: NDArray[Any],
+ max_line_width: int | None = None,
+ precision: SupportsIndex | None = None,
+ suppress_small: bool | None = None,
+ separator: str = " ",
+ prefix: str = "",
+ *,
+ style: _ReprFunc,
+ formatter: _FormatDict | None = None,
+ threshold: int | None = None,
+ edgeitems: int | None = None,
+ sign: _Sign | None = None,
+ floatmode: _FloatMode | None = None,
+ suffix: str = "",
+ legacy: _LegacyNoStyle | None = None,
+) -> str: ...
+
+def format_float_scientific(
+ x: _FloatLike_co,
+ precision: int | None = ...,
+ unique: bool = ...,
+ trim: _Trim = "k",
+ sign: bool = ...,
+ pad_left: int | None = ...,
+ exp_digits: int | None = ...,
+ min_digits: int | None = ...,
+) -> str: ...
+def format_float_positional(
+ x: _FloatLike_co,
+ precision: int | None = ...,
+ unique: bool = ...,
+ fractional: bool = ...,
+ trim: _Trim = "k",
+ sign: bool = ...,
+ pad_left: int | None = ...,
+ pad_right: int | None = ...,
+ min_digits: int | None = ...,
+) -> str: ...
+def array_repr(
+ arr: NDArray[Any],
+ max_line_width: int | None = ...,
+ precision: SupportsIndex | None = ...,
+ suppress_small: bool | None = ...,
+) -> str: ...
+def array_str(
+ a: NDArray[Any],
+ max_line_width: int | None = ...,
+ precision: SupportsIndex | None = ...,
+ suppress_small: bool | None = ...,
+) -> str: ...
+def printoptions(
+ precision: SupportsIndex | None = ...,
+ threshold: int | None = ...,
+ edgeitems: int | None = ...,
+ linewidth: int | None = ...,
+ suppress: bool | None = ...,
+ nanstr: str | None = ...,
+ infstr: str | None = ...,
+ formatter: _FormatDict | None = ...,
+ sign: _Sign | None = None,
+ floatmode: _FloatMode | None = None,
+ *,
+ legacy: _Legacy | None = None,
+ override_repr: _ReprFunc | None = None,
+) -> _GeneratorContextManager[_FormatOptions]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/cversions.py b/.venv/lib/python3.12/site-packages/numpy/_core/cversions.py
new file mode 100644
index 00000000..00159c3a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/cversions.py
@@ -0,0 +1,13 @@
+"""Simple script to compute the api hash of the current API.
+
+The API has is defined by numpy_api_order and ufunc_api_order.
+
+"""
+from os.path import dirname
+
+from code_generators.genapi import fullapi_hash
+from code_generators.numpy_api import full_api
+
+if __name__ == '__main__':
+ curdir = dirname(__file__)
+ print(fullapi_hash(full_api))
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/defchararray.py b/.venv/lib/python3.12/site-packages/numpy/_core/defchararray.py
new file mode 100644
index 00000000..bde8921f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/defchararray.py
@@ -0,0 +1,1427 @@
+"""
+This module contains a set of functions for vectorized string
+operations and methods.
+
+.. note::
+ The `chararray` class exists for backwards compatibility with
+ Numarray, it is not recommended for new development. Starting from numpy
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
+ `dtype` `object_`, `bytes_` or `str_`, and use the free functions
+ in the `numpy.char` module for fast vectorized string operations.
+
+Some methods will only be available if the corresponding string method is
+available in your version of Python.
+
+The preferred alias for `defchararray` is `numpy.char`.
+
+"""
+import functools
+
+import numpy as np
+from numpy._core import overrides
+from numpy._core.multiarray import compare_chararrays
+from numpy._core.strings import (
+ _join as join,
+)
+from numpy._core.strings import (
+ _rsplit as rsplit,
+)
+from numpy._core.strings import (
+ _split as split,
+)
+from numpy._core.strings import (
+ _splitlines as splitlines,
+)
+from numpy._utils import set_module
+from numpy.strings import *
+from numpy.strings import (
+ multiply as strings_multiply,
+)
+from numpy.strings import (
+ partition as strings_partition,
+)
+from numpy.strings import (
+ rpartition as strings_rpartition,
+)
+
+from .numeric import array as narray
+from .numeric import asarray as asnarray
+from .numeric import ndarray
+from .numerictypes import bytes_, character, str_
+
+__all__ = [
+ 'equal', 'not_equal', 'greater_equal', 'less_equal',
+ 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
+ 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
+ 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
+ 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
+ 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
+ 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
+ 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
+ 'array', 'asarray', 'compare_chararrays', 'chararray'
+ ]
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.char')
+
+
+def _binary_op_dispatcher(x1, x2):
+ return (x1, x2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def equal(x1, x2):
+ """
+ Return (x1 == x2) element-wise.
+
+ Unlike `numpy.equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> y = "aa "
+ >>> x = "aa"
+ >>> np.char.equal(x, y)
+ array(True)
+
+ See Also
+ --------
+ not_equal, greater_equal, less_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '==', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def not_equal(x1, x2):
+ """
+ Return (x1 != x2) element-wise.
+
+ Unlike `numpy.not_equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, greater_equal, less_equal, greater, less
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x1 = np.array(['a', 'b', 'c'])
+ >>> np.char.not_equal(x1, 'b')
+ array([ True, False, True])
+
+ """
+ return compare_chararrays(x1, x2, '!=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater_equal(x1, x2):
+ """
+ Return (x1 >= x2) element-wise.
+
+ Unlike `numpy.greater_equal`, this comparison is performed by
+ first stripping whitespace characters from the end of the string.
+ This behavior is provided for backward-compatibility with
+ numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, less_equal, greater, less
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x1 = np.array(['a', 'b', 'c'])
+ >>> np.char.greater_equal(x1, 'b')
+ array([False, True, True])
+
+ """
+ return compare_chararrays(x1, x2, '>=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less_equal(x1, x2):
+ """
+ Return (x1 <= x2) element-wise.
+
+ Unlike `numpy.less_equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, greater, less
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x1 = np.array(['a', 'b', 'c'])
+ >>> np.char.less_equal(x1, 'b')
+ array([ True, True, False])
+
+ """
+ return compare_chararrays(x1, x2, '<=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater(x1, x2):
+ """
+ Return (x1 > x2) element-wise.
+
+ Unlike `numpy.greater`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, less_equal, less
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x1 = np.array(['a', 'b', 'c'])
+ >>> np.char.greater(x1, 'b')
+ array([False, False, True])
+
+ """
+ return compare_chararrays(x1, x2, '>', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less(x1, x2):
+ """
+ Return (x1 < x2) element-wise.
+
+ Unlike `numpy.greater`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, less_equal, greater
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x1 = np.array(['a', 'b', 'c'])
+ >>> np.char.less(x1, 'b')
+ array([True, False, False])
+
+ """
+ return compare_chararrays(x1, x2, '<', True)
+
+
+@set_module("numpy.char")
+def multiply(a, i):
+ """
+ Return (a * i), that is string multiple concatenation,
+ element-wise.
+
+ Values in ``i`` of less than 0 are treated as 0 (which yields an
+ empty string).
+
+ Parameters
+ ----------
+ a : array_like, with `np.bytes_` or `np.str_` dtype
+
+ i : array_like, with any integer dtype
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input types
+
+ Notes
+ -----
+ This is a thin wrapper around np.strings.multiply that raises
+ `ValueError` when ``i`` is not an integer. It only
+ exists for backwards-compatibility.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array(["a", "b", "c"])
+ >>> np.strings.multiply(a, 3)
+ array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3])
+ >>> np.strings.multiply(a, i)
+ array(['a', 'bb', 'ccc'], dtype='>> np.strings.multiply(np.array(['a']), i)
+ array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
+ >>> np.strings.multiply(a, 3)
+ array([['aaa', 'bbb', 'ccc'],
+ ['ddd', 'eee', 'fff']], dtype='>> np.strings.multiply(a, i)
+ array([['a', 'bb', 'ccc'],
+ ['d', 'ee', 'fff']], dtype='>> import numpy as np
+ >>> x = np.array(["Numpy is nice!"])
+ >>> np.char.partition(x, " ")
+ array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np
+ >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> np.char.rpartition(a, 'A')
+ array([['aAaAa', 'A', ''],
+ [' a', 'A', ' '],
+ ['abB', 'A', 'Bba']], dtype='= 2`` and ``order='F'``, in which case `strides`
+ is in "Fortran order".
+
+ Methods
+ -------
+ astype
+ argsort
+ copy
+ count
+ decode
+ dump
+ dumps
+ encode
+ endswith
+ expandtabs
+ fill
+ find
+ flatten
+ getfield
+ index
+ isalnum
+ isalpha
+ isdecimal
+ isdigit
+ islower
+ isnumeric
+ isspace
+ istitle
+ isupper
+ item
+ join
+ ljust
+ lower
+ lstrip
+ nonzero
+ put
+ ravel
+ repeat
+ replace
+ reshape
+ resize
+ rfind
+ rindex
+ rjust
+ rsplit
+ rstrip
+ searchsorted
+ setfield
+ setflags
+ sort
+ split
+ splitlines
+ squeeze
+ startswith
+ strip
+ swapaxes
+ swapcase
+ take
+ title
+ tofile
+ tolist
+ tostring
+ translate
+ transpose
+ upper
+ view
+ zfill
+
+ Parameters
+ ----------
+ shape : tuple
+ Shape of the array.
+ itemsize : int, optional
+ Length of each array element, in number of characters. Default is 1.
+ unicode : bool, optional
+ Are the array elements of type unicode (True) or string (False).
+ Default is False.
+ buffer : object exposing the buffer interface or str, optional
+ Memory address of the start of the array data. Default is None,
+ in which case a new array is created.
+ offset : int, optional
+ Fixed stride displacement from the beginning of an axis?
+ Default is 0. Needs to be >=0.
+ strides : array_like of ints, optional
+ Strides for the array (see `~numpy.ndarray.strides` for
+ full description). Default is None.
+ order : {'C', 'F'}, optional
+ The order in which the array data is stored in memory: 'C' ->
+ "row major" order (the default), 'F' -> "column major"
+ (Fortran) order.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> charar = np.char.chararray((3, 3))
+ >>> charar[:] = 'a'
+ >>> charar
+ chararray([[b'a', b'a', b'a'],
+ [b'a', b'a', b'a'],
+ [b'a', b'a', b'a']], dtype='|S1')
+
+ >>> charar = np.char.chararray(charar.shape, itemsize=5)
+ >>> charar[:] = 'abc'
+ >>> charar
+ chararray([[b'abc', b'abc', b'abc'],
+ [b'abc', b'abc', b'abc'],
+ [b'abc', b'abc', b'abc']], dtype='|S5')
+
+ """
+ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
+ offset=0, strides=None, order='C'):
+ if unicode:
+ dtype = str_
+ else:
+ dtype = bytes_
+
+ # force itemsize to be a Python int, since using NumPy integer
+ # types results in itemsize.itemsize being used as the size of
+ # strings in the new array.
+ itemsize = int(itemsize)
+
+ if isinstance(buffer, str):
+ # unicode objects do not have the buffer interface
+ filler = buffer
+ buffer = None
+ else:
+ filler = None
+
+ if buffer is None:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ order=order)
+ else:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ buffer=buffer,
+ offset=offset, strides=strides,
+ order=order)
+ if filler is not None:
+ self[...] = filler
+
+ return self
+
+ def __array_wrap__(self, arr, context=None, return_scalar=False):
+ # When calling a ufunc (and some other functions), we return a
+ # chararray if the ufunc output is a string-like array,
+ # or an ndarray otherwise
+ if arr.dtype.char in "SUbc":
+ return arr.view(type(self))
+ return arr
+
+ def __array_finalize__(self, obj):
+ # The b is a special case because it is used for reconstructing.
+ if self.dtype.char not in 'VSUbc':
+ raise ValueError("Can only create a chararray from string data.")
+
+ def __getitem__(self, obj):
+ val = ndarray.__getitem__(self, obj)
+ if isinstance(val, character):
+ return val.rstrip()
+ return val
+
+ # IMPLEMENTATION NOTE: Most of the methods of this class are
+ # direct delegations to the free functions in this module.
+ # However, those that return an array of strings should instead
+ # return a chararray, so some extra wrapping is required.
+
+ def __eq__(self, other):
+ """
+ Return (self == other) element-wise.
+
+ See Also
+ --------
+ equal
+ """
+ return equal(self, other)
+
+ def __ne__(self, other):
+ """
+ Return (self != other) element-wise.
+
+ See Also
+ --------
+ not_equal
+ """
+ return not_equal(self, other)
+
+ def __ge__(self, other):
+ """
+ Return (self >= other) element-wise.
+
+ See Also
+ --------
+ greater_equal
+ """
+ return greater_equal(self, other)
+
+ def __le__(self, other):
+ """
+ Return (self <= other) element-wise.
+
+ See Also
+ --------
+ less_equal
+ """
+ return less_equal(self, other)
+
+ def __gt__(self, other):
+ """
+ Return (self > other) element-wise.
+
+ See Also
+ --------
+ greater
+ """
+ return greater(self, other)
+
+ def __lt__(self, other):
+ """
+ Return (self < other) element-wise.
+
+ See Also
+ --------
+ less
+ """
+ return less(self, other)
+
+ def __add__(self, other):
+ """
+ Return (self + other), that is string concatenation,
+ element-wise for a pair of array_likes of str or unicode.
+
+ See Also
+ --------
+ add
+ """
+ return add(self, other)
+
+ def __radd__(self, other):
+ """
+ Return (other + self), that is string concatenation,
+ element-wise for a pair of array_likes of `bytes_` or `str_`.
+
+ See Also
+ --------
+ add
+ """
+ return add(other, self)
+
+ def __mul__(self, i):
+ """
+ Return (self * i), that is string multiple concatenation,
+ element-wise.
+
+ See Also
+ --------
+ multiply
+ """
+ return asarray(multiply(self, i))
+
+ def __rmul__(self, i):
+ """
+ Return (self * i), that is string multiple concatenation,
+ element-wise.
+
+ See Also
+ --------
+ multiply
+ """
+ return asarray(multiply(self, i))
+
+ def __mod__(self, i):
+ """
+ Return (self % i), that is pre-Python 2.6 string formatting
+ (interpolation), element-wise for a pair of array_likes of `bytes_`
+ or `str_`.
+
+ See Also
+ --------
+ mod
+ """
+ return asarray(mod(self, i))
+
+ def __rmod__(self, other):
+ return NotImplemented
+
+ def argsort(self, axis=-1, kind=None, order=None):
+ """
+ Return the indices that sort the array lexicographically.
+
+ For full documentation see `numpy.argsort`, for which this method is
+ in fact merely a "thin wrapper."
+
+ Examples
+ --------
+ >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
+ >>> c = c.view(np.char.chararray); c
+ chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
+ dtype='|S5')
+ >>> c[c.argsort()]
+ chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
+ dtype='|S5')
+
+ """
+ return self.__array__().argsort(axis, kind, order)
+ argsort.__doc__ = ndarray.argsort.__doc__
+
+ def capitalize(self):
+ """
+ Return a copy of `self` with only the first character of each element
+ capitalized.
+
+ See Also
+ --------
+ char.capitalize
+
+ """
+ return asarray(capitalize(self))
+
+ def center(self, width, fillchar=' '):
+ """
+ Return a copy of `self` with its elements centered in a
+ string of length `width`.
+
+ See Also
+ --------
+ center
+ """
+ return asarray(center(self, width, fillchar))
+
+ def count(self, sub, start=0, end=None):
+ """
+ Returns an array with the number of non-overlapping occurrences of
+ substring `sub` in the range [`start`, `end`].
+
+ See Also
+ --------
+ char.count
+
+ """
+ return count(self, sub, start, end)
+
+ def decode(self, encoding=None, errors=None):
+ """
+ Calls ``bytes.decode`` element-wise.
+
+ See Also
+ --------
+ char.decode
+
+ """
+ return decode(self, encoding, errors)
+
+ def encode(self, encoding=None, errors=None):
+ """
+ Calls :meth:`str.encode` element-wise.
+
+ See Also
+ --------
+ char.encode
+
+ """
+ return encode(self, encoding, errors)
+
+ def endswith(self, suffix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `self` ends with `suffix`, otherwise `False`.
+
+ See Also
+ --------
+ char.endswith
+
+ """
+ return endswith(self, suffix, start, end)
+
+ def expandtabs(self, tabsize=8):
+ """
+ Return a copy of each string element where all tab characters are
+ replaced by one or more spaces.
+
+ See Also
+ --------
+ char.expandtabs
+
+ """
+ return asarray(expandtabs(self, tabsize))
+
+ def find(self, sub, start=0, end=None):
+ """
+ For each element, return the lowest index in the string where
+ substring `sub` is found.
+
+ See Also
+ --------
+ char.find
+
+ """
+ return find(self, sub, start, end)
+
+ def index(self, sub, start=0, end=None):
+ """
+ Like `find`, but raises :exc:`ValueError` when the substring is not
+ found.
+
+ See Also
+ --------
+ char.index
+
+ """
+ return index(self, sub, start, end)
+
+ def isalnum(self):
+ """
+ Returns true for each element if all characters in the string
+ are alphanumeric and there is at least one character, false
+ otherwise.
+
+ See Also
+ --------
+ char.isalnum
+
+ """
+ return isalnum(self)
+
+ def isalpha(self):
+ """
+ Returns true for each element if all characters in the string
+ are alphabetic and there is at least one character, false
+ otherwise.
+
+ See Also
+ --------
+ char.isalpha
+
+ """
+ return isalpha(self)
+
+ def isdigit(self):
+ """
+ Returns true for each element if all characters in the string are
+ digits and there is at least one character, false otherwise.
+
+ See Also
+ --------
+ char.isdigit
+
+ """
+ return isdigit(self)
+
+ def islower(self):
+ """
+ Returns true for each element if all cased characters in the
+ string are lowercase and there is at least one cased character,
+ false otherwise.
+
+ See Also
+ --------
+ char.islower
+
+ """
+ return islower(self)
+
+ def isspace(self):
+ """
+ Returns true for each element if there are only whitespace
+ characters in the string and there is at least one character,
+ false otherwise.
+
+ See Also
+ --------
+ char.isspace
+
+ """
+ return isspace(self)
+
+ def istitle(self):
+ """
+ Returns true for each element if the element is a titlecased
+ string and there is at least one character, false otherwise.
+
+ See Also
+ --------
+ char.istitle
+
+ """
+ return istitle(self)
+
+ def isupper(self):
+ """
+ Returns true for each element if all cased characters in the
+ string are uppercase and there is at least one character, false
+ otherwise.
+
+ See Also
+ --------
+ char.isupper
+
+ """
+ return isupper(self)
+
+ def join(self, seq):
+ """
+ Return a string which is the concatenation of the strings in the
+ sequence `seq`.
+
+ See Also
+ --------
+ char.join
+
+ """
+ return join(self, seq)
+
+ def ljust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self` left-justified in a
+ string of length `width`.
+
+ See Also
+ --------
+ char.ljust
+
+ """
+ return asarray(ljust(self, width, fillchar))
+
+ def lower(self):
+ """
+ Return an array with the elements of `self` converted to
+ lowercase.
+
+ See Also
+ --------
+ char.lower
+
+ """
+ return asarray(lower(self))
+
+ def lstrip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the leading characters
+ removed.
+
+ See Also
+ --------
+ char.lstrip
+
+ """
+ return lstrip(self, chars)
+
+ def partition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
+
+ See Also
+ --------
+ partition
+ """
+ return asarray(partition(self, sep))
+
+ def replace(self, old, new, count=None):
+ """
+ For each element in `self`, return a copy of the string with all
+ occurrences of substring `old` replaced by `new`.
+
+ See Also
+ --------
+ char.replace
+
+ """
+ return replace(self, old, new, count if count is not None else -1)
+
+ def rfind(self, sub, start=0, end=None):
+ """
+ For each element in `self`, return the highest index in the string
+ where substring `sub` is found, such that `sub` is contained
+ within [`start`, `end`].
+
+ See Also
+ --------
+ char.rfind
+
+ """
+ return rfind(self, sub, start, end)
+
+ def rindex(self, sub, start=0, end=None):
+ """
+ Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is
+ not found.
+
+ See Also
+ --------
+ char.rindex
+
+ """
+ return rindex(self, sub, start, end)
+
+ def rjust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self`
+ right-justified in a string of length `width`.
+
+ See Also
+ --------
+ char.rjust
+
+ """
+ return asarray(rjust(self, width, fillchar))
+
+ def rpartition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
+
+ See Also
+ --------
+ rpartition
+ """
+ return asarray(rpartition(self, sep))
+
+ def rsplit(self, sep=None, maxsplit=None):
+ """
+ For each element in `self`, return a list of the words in
+ the string, using `sep` as the delimiter string.
+
+ See Also
+ --------
+ char.rsplit
+
+ """
+ return rsplit(self, sep, maxsplit)
+
+ def rstrip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the trailing
+ characters removed.
+
+ See Also
+ --------
+ char.rstrip
+
+ """
+ return rstrip(self, chars)
+
+ def split(self, sep=None, maxsplit=None):
+ """
+ For each element in `self`, return a list of the words in the
+ string, using `sep` as the delimiter string.
+
+ See Also
+ --------
+ char.split
+
+ """
+ return split(self, sep, maxsplit)
+
+ def splitlines(self, keepends=None):
+ """
+ For each element in `self`, return a list of the lines in the
+ element, breaking at line boundaries.
+
+ See Also
+ --------
+ char.splitlines
+
+ """
+ return splitlines(self, keepends)
+
+ def startswith(self, prefix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `self` starts with `prefix`, otherwise `False`.
+
+ See Also
+ --------
+ char.startswith
+
+ """
+ return startswith(self, prefix, start, end)
+
+ def strip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the leading and
+ trailing characters removed.
+
+ See Also
+ --------
+ char.strip
+
+ """
+ return strip(self, chars)
+
+ def swapcase(self):
+ """
+ For each element in `self`, return a copy of the string with
+ uppercase characters converted to lowercase and vice versa.
+
+ See Also
+ --------
+ char.swapcase
+
+ """
+ return asarray(swapcase(self))
+
+ def title(self):
+ """
+ For each element in `self`, return a titlecased version of the
+ string: words start with uppercase characters, all remaining cased
+ characters are lowercase.
+
+ See Also
+ --------
+ char.title
+
+ """
+ return asarray(title(self))
+
+ def translate(self, table, deletechars=None):
+ """
+ For each element in `self`, return a copy of the string where
+ all characters occurring in the optional argument
+ `deletechars` are removed, and the remaining characters have
+ been mapped through the given translation table.
+
+ See Also
+ --------
+ char.translate
+
+ """
+ return asarray(translate(self, table, deletechars))
+
+ def upper(self):
+ """
+ Return an array with the elements of `self` converted to
+ uppercase.
+
+ See Also
+ --------
+ char.upper
+
+ """
+ return asarray(upper(self))
+
+ def zfill(self, width):
+ """
+ Return the numeric string left-filled with zeros in a string of
+ length `width`.
+
+ See Also
+ --------
+ char.zfill
+
+ """
+ return asarray(zfill(self, width))
+
+ def isnumeric(self):
+ """
+ For each element in `self`, return True if there are only
+ numeric characters in the element.
+
+ See Also
+ --------
+ char.isnumeric
+
+ """
+ return isnumeric(self)
+
+ def isdecimal(self):
+ """
+ For each element in `self`, return True if there are only
+ decimal characters in the element.
+
+ See Also
+ --------
+ char.isdecimal
+
+ """
+ return isdecimal(self)
+
+
+@set_module("numpy.char")
+def array(obj, itemsize=None, copy=True, unicode=None, order=None):
+ """
+ Create a `~numpy.char.chararray`.
+
+ .. note::
+ This class is provided for numarray backward-compatibility.
+ New code (not concerned with numarray compatibility) should use
+ arrays of type `bytes_` or `str_` and use the free functions
+ in :mod:`numpy.char` for fast vectorized string operations instead.
+
+ Versus a NumPy array of dtype `bytes_` or `str_`, this
+ class adds the following functionality:
+
+ 1) values automatically have whitespace removed from the end
+ when indexed
+
+ 2) comparison operators automatically remove whitespace from the
+ end when comparing values
+
+ 3) vectorized string operations are provided as methods
+ (e.g. `chararray.endswith `)
+ and infix operators (e.g. ``+, *, %``)
+
+ Parameters
+ ----------
+ obj : array of str or unicode-like
+
+ itemsize : int, optional
+ `itemsize` is the number of characters per scalar in the
+ resulting array. If `itemsize` is None, and `obj` is an
+ object array or a Python list, the `itemsize` will be
+ automatically determined. If `itemsize` is provided and `obj`
+ is of type str or unicode, then the `obj` string will be
+ chunked into `itemsize` pieces.
+
+ copy : bool, optional
+ If true (default), then the object is copied. Otherwise, a copy
+ will only be made if ``__array__`` returns a copy, if obj is a
+ nested sequence, or if a copy is needed to satisfy any of the other
+ requirements (`itemsize`, unicode, `order`, etc.).
+
+ unicode : bool, optional
+ When true, the resulting `~numpy.char.chararray` can contain Unicode
+ characters, when false only 8-bit characters. If unicode is
+ None and `obj` is one of the following:
+
+ - a `~numpy.char.chararray`,
+ - an ndarray of type :class:`str_` or :class:`bytes_`
+ - a Python :class:`str` or :class:`bytes` object,
+
+ then the unicode setting of the output array will be
+ automatically determined.
+
+ order : {'C', 'F', 'A'}, optional
+ Specify the order of the array. If order is 'C' (default), then the
+ array will be in C-contiguous order (last-index varies the
+ fastest). If order is 'F', then the returned array
+ will be in Fortran-contiguous order (first-index varies the
+ fastest). If order is 'A', then the returned array may
+ be in any order (either C-, Fortran-contiguous, or even
+ discontiguous).
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> char_array = np.char.array(['hello', 'world', 'numpy','array'])
+ >>> char_array
+ chararray(['hello', 'world', 'numpy', 'array'], dtype='`)
+ and infix operators (e.g. ``+``, ``*``, ``%``)
+
+ Parameters
+ ----------
+ obj : array of str or unicode-like
+
+ itemsize : int, optional
+ `itemsize` is the number of characters per scalar in the
+ resulting array. If `itemsize` is None, and `obj` is an
+ object array or a Python list, the `itemsize` will be
+ automatically determined. If `itemsize` is provided and `obj`
+ is of type str or unicode, then the `obj` string will be
+ chunked into `itemsize` pieces.
+
+ unicode : bool, optional
+ When true, the resulting `~numpy.char.chararray` can contain Unicode
+ characters, when false only 8-bit characters. If unicode is
+ None and `obj` is one of the following:
+
+ - a `~numpy.char.chararray`,
+ - an ndarray of type `str_` or `unicode_`
+ - a Python str or unicode object,
+
+ then the unicode setting of the output array will be
+ automatically determined.
+
+ order : {'C', 'F'}, optional
+ Specify the order of the array. If order is 'C' (default), then the
+ array will be in C-contiguous order (last-index varies the
+ fastest). If order is 'F', then the returned array
+ will be in Fortran-contiguous order (first-index varies the
+ fastest).
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.char.asarray(['hello', 'world'])
+ chararray(['hello', 'world'], dtype=' _CharArray[bytes_]: ...
+ @overload
+ def __new__(
+ subtype,
+ shape: _ShapeLike,
+ itemsize: SupportsIndex | SupportsInt = ...,
+ unicode: L[True] = ...,
+ buffer: _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: _ShapeLike = ...,
+ order: _OrderKACF = ...,
+ ) -> _CharArray[str_]: ...
+
+ def __array_finalize__(self, obj: object) -> None: ...
+ def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ...
+ def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ...
+ def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ...
+
+ @overload
+ def __eq__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __eq__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> NDArray[np.bool]: ...
+
+ @overload
+ def __ne__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __ne__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> NDArray[np.bool]: ...
+
+ @overload
+ def __ge__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __ge__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> NDArray[np.bool]: ...
+
+ @overload
+ def __le__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __le__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> NDArray[np.bool]: ...
+
+ @overload
+ def __gt__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __gt__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> NDArray[np.bool]: ...
+
+ @overload
+ def __lt__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def __lt__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> NDArray[np.bool]: ...
+
+ @overload
+ def __add__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def __add__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def __radd__(
+ self: _CharArray[str_],
+ other: U_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def __radd__(
+ self: _CharArray[bytes_],
+ other: S_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def center(
+ self: _CharArray[str_],
+ width: i_co,
+ fillchar: U_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def center(
+ self: _CharArray[bytes_],
+ width: i_co,
+ fillchar: S_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def count(
+ self: _CharArray[str_],
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def count(
+ self: _CharArray[bytes_],
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+
+ def decode(
+ self: _CharArray[bytes_],
+ encoding: str | None = ...,
+ errors: str | None = ...,
+ ) -> _CharArray[str_]: ...
+
+ def encode(
+ self: _CharArray[str_],
+ encoding: str | None = ...,
+ errors: str | None = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def endswith(
+ self: _CharArray[str_],
+ suffix: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def endswith(
+ self: _CharArray[bytes_],
+ suffix: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[np.bool]: ...
+
+ def expandtabs(
+ self,
+ tabsize: i_co = ...,
+ ) -> Self: ...
+
+ @overload
+ def find(
+ self: _CharArray[str_],
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def find(
+ self: _CharArray[bytes_],
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def index(
+ self: _CharArray[str_],
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def index(
+ self: _CharArray[bytes_],
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def join(
+ self: _CharArray[str_],
+ seq: U_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def join(
+ self: _CharArray[bytes_],
+ seq: S_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def ljust(
+ self: _CharArray[str_],
+ width: i_co,
+ fillchar: U_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def ljust(
+ self: _CharArray[bytes_],
+ width: i_co,
+ fillchar: S_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def lstrip(
+ self: _CharArray[str_],
+ chars: U_co | None = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def lstrip(
+ self: _CharArray[bytes_],
+ chars: S_co | None = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def partition(
+ self: _CharArray[str_],
+ sep: U_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def partition(
+ self: _CharArray[bytes_],
+ sep: S_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def replace(
+ self: _CharArray[str_],
+ old: U_co,
+ new: U_co,
+ count: i_co | None = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def replace(
+ self: _CharArray[bytes_],
+ old: S_co,
+ new: S_co,
+ count: i_co | None = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rfind(
+ self: _CharArray[str_],
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def rfind(
+ self: _CharArray[bytes_],
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def rindex(
+ self: _CharArray[str_],
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def rindex(
+ self: _CharArray[bytes_],
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def rjust(
+ self: _CharArray[str_],
+ width: i_co,
+ fillchar: U_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rjust(
+ self: _CharArray[bytes_],
+ width: i_co,
+ fillchar: S_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rpartition(
+ self: _CharArray[str_],
+ sep: U_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rpartition(
+ self: _CharArray[bytes_],
+ sep: S_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rsplit(
+ self: _CharArray[str_],
+ sep: U_co | None = ...,
+ maxsplit: i_co | None = ...,
+ ) -> NDArray[object_]: ...
+ @overload
+ def rsplit(
+ self: _CharArray[bytes_],
+ sep: S_co | None = ...,
+ maxsplit: i_co | None = ...,
+ ) -> NDArray[object_]: ...
+
+ @overload
+ def rstrip(
+ self: _CharArray[str_],
+ chars: U_co | None = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rstrip(
+ self: _CharArray[bytes_],
+ chars: S_co | None = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def split(
+ self: _CharArray[str_],
+ sep: U_co | None = ...,
+ maxsplit: i_co | None = ...,
+ ) -> NDArray[object_]: ...
+ @overload
+ def split(
+ self: _CharArray[bytes_],
+ sep: S_co | None = ...,
+ maxsplit: i_co | None = ...,
+ ) -> NDArray[object_]: ...
+
+ def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ...
+
+ @overload
+ def startswith(
+ self: _CharArray[str_],
+ prefix: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[np.bool]: ...
+ @overload
+ def startswith(
+ self: _CharArray[bytes_],
+ prefix: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+ ) -> NDArray[np.bool]: ...
+
+ @overload
+ def strip(
+ self: _CharArray[str_],
+ chars: U_co | None = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def strip(
+ self: _CharArray[bytes_],
+ chars: S_co | None = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def translate(
+ self: _CharArray[str_],
+ table: U_co,
+ deletechars: U_co | None = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def translate(
+ self: _CharArray[bytes_],
+ table: S_co,
+ deletechars: S_co | None = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ def zfill(self, width: i_co) -> Self: ...
+ def capitalize(self) -> Self: ...
+ def title(self) -> Self: ...
+ def swapcase(self) -> Self: ...
+ def lower(self) -> Self: ...
+ def upper(self) -> Self: ...
+ def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+ def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
+
+# Comparison
+@overload
+def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
+@overload
+def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
+@overload
+def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
+
+@overload
+def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
+@overload
+def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
+@overload
+def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
+
+@overload
+def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
+@overload
+def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
+@overload
+def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
+
+@overload
+def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
+@overload
+def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
+@overload
+def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
+
+@overload
+def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
+@overload
+def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
+@overload
+def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
+
+@overload
+def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
+@overload
+def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
+@overload
+def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
+
+@overload
+def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ...
+@overload
+def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ...
+@overload
+def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ...
+@overload
+def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ...
+@overload
+def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ...
+@overload
+def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def mod(a: U_co, value: Any) -> NDArray[np.str_]: ...
+@overload
+def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ...
+@overload
+def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ...
+@overload
+def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def capitalize(a: U_co) -> NDArray[str_]: ...
+@overload
+def capitalize(a: S_co) -> NDArray[bytes_]: ...
+@overload
+def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
+@overload
+def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
+@overload
+def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
+@overload
+def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
+
+def decode(
+ a: S_co,
+ encoding: str | None = ...,
+ errors: str | None = ...,
+) -> NDArray[str_]: ...
+def encode(
+ a: U_co | T_co,
+ encoding: str | None = ...,
+ errors: str | None = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ...
+@overload
+def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ...
+@overload
+def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ...
+@overload
+def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def join(sep: U_co, seq: U_co) -> NDArray[str_]: ...
+@overload
+def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ...
+@overload
+def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
+@overload
+def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
+@overload
+def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
+@overload
+def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def lower(a: U_co) -> NDArray[str_]: ...
+@overload
+def lower(a: S_co) -> NDArray[bytes_]: ...
+@overload
+def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...
+@overload
+def lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...
+@overload
+def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...
+@overload
+def lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def partition(a: U_co, sep: U_co) -> NDArray[str_]: ...
+@overload
+def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
+@overload
+def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def replace(
+ a: U_co,
+ old: U_co,
+ new: U_co,
+ count: i_co | None = ...,
+) -> NDArray[str_]: ...
+@overload
+def replace(
+ a: S_co,
+ old: S_co,
+ new: S_co,
+ count: i_co | None = ...,
+) -> NDArray[bytes_]: ...
+@overload
+def replace(
+ a: _StringDTypeSupportsArray,
+ old: _StringDTypeSupportsArray,
+ new: _StringDTypeSupportsArray,
+ count: i_co = ...,
+) -> _StringDTypeArray: ...
+@overload
+def replace(
+ a: T_co,
+ old: T_co,
+ new: T_co,
+ count: i_co = ...,
+) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def rjust(
+ a: U_co,
+ width: i_co,
+ fillchar: U_co = ...,
+) -> NDArray[str_]: ...
+@overload
+def rjust(
+ a: S_co,
+ width: i_co,
+ fillchar: S_co = ...,
+) -> NDArray[bytes_]: ...
+@overload
+def rjust(
+ a: _StringDTypeSupportsArray,
+ width: i_co,
+ fillchar: _StringDTypeSupportsArray = ...,
+) -> _StringDTypeArray: ...
+@overload
+def rjust(
+ a: T_co,
+ width: i_co,
+ fillchar: T_co = ...,
+) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ...
+@overload
+def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
+@overload
+def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def rsplit(
+ a: U_co,
+ sep: U_co | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+@overload
+def rsplit(
+ a: S_co,
+ sep: S_co | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+@overload
+def rsplit(
+ a: _StringDTypeSupportsArray,
+ sep: _StringDTypeSupportsArray | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+@overload
+def rsplit(
+ a: T_co,
+ sep: T_co | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...
+@overload
+def rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...
+@overload
+def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...
+@overload
+def rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def split(
+ a: U_co,
+ sep: U_co | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+@overload
+def split(
+ a: S_co,
+ sep: S_co | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+@overload
+def split(
+ a: _StringDTypeSupportsArray,
+ sep: _StringDTypeSupportsArray | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+@overload
+def split(
+ a: T_co,
+ sep: T_co | None = ...,
+ maxsplit: i_co | None = ...,
+) -> NDArray[object_]: ...
+
+def splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ...
+
+@overload
+def strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...
+@overload
+def strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...
+@overload
+def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...
+@overload
+def strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def swapcase(a: U_co) -> NDArray[str_]: ...
+@overload
+def swapcase(a: S_co) -> NDArray[bytes_]: ...
+@overload
+def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def title(a: U_co) -> NDArray[str_]: ...
+@overload
+def title(a: S_co) -> NDArray[bytes_]: ...
+@overload
+def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def title(a: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def translate(
+ a: U_co,
+ table: str,
+ deletechars: str | None = ...,
+) -> NDArray[str_]: ...
+@overload
+def translate(
+ a: S_co,
+ table: str,
+ deletechars: str | None = ...,
+) -> NDArray[bytes_]: ...
+@overload
+def translate(
+ a: _StringDTypeSupportsArray,
+ table: str,
+ deletechars: str | None = ...,
+) -> _StringDTypeArray: ...
+@overload
+def translate(
+ a: T_co,
+ table: str,
+ deletechars: str | None = ...,
+) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def upper(a: U_co) -> NDArray[str_]: ...
+@overload
+def upper(a: S_co) -> NDArray[bytes_]: ...
+@overload
+def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
+@overload
+def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ...
+
+@overload
+def zfill(a: U_co, width: i_co) -> NDArray[str_]: ...
+@overload
+def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ...
+@overload
+def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ...
+@overload
+def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ...
+
+# String information
+@overload
+def count(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def count(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def count(
+ a: T_co,
+ sub: T_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.int_]: ...
+
+@overload
+def endswith(
+ a: U_co,
+ suffix: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.bool]: ...
+@overload
+def endswith(
+ a: S_co,
+ suffix: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.bool]: ...
+@overload
+def endswith(
+ a: T_co,
+ suffix: T_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.bool]: ...
+
+@overload
+def find(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def find(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def find(
+ a: T_co,
+ sub: T_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.int_]: ...
+
+@overload
+def index(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def index(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def index(
+ a: T_co,
+ sub: T_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.int_]: ...
+
+def isalpha(a: UST_co) -> NDArray[np.bool]: ...
+def isalnum(a: UST_co) -> NDArray[np.bool]: ...
+def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ...
+def isdigit(a: UST_co) -> NDArray[np.bool]: ...
+def islower(a: UST_co) -> NDArray[np.bool]: ...
+def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ...
+def isspace(a: UST_co) -> NDArray[np.bool]: ...
+def istitle(a: UST_co) -> NDArray[np.bool]: ...
+def isupper(a: UST_co) -> NDArray[np.bool]: ...
+
+@overload
+def rfind(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def rfind(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def rfind(
+ a: T_co,
+ sub: T_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.int_]: ...
+
+@overload
+def rindex(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def rindex(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[int_]: ...
+@overload
+def rindex(
+ a: T_co,
+ sub: T_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.int_]: ...
+
+@overload
+def startswith(
+ a: U_co,
+ prefix: U_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.bool]: ...
+@overload
+def startswith(
+ a: S_co,
+ prefix: S_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.bool]: ...
+@overload
+def startswith(
+ a: T_co,
+ suffix: T_co,
+ start: i_co = ...,
+ end: i_co | None = ...,
+) -> NDArray[np.bool]: ...
+
+def str_len(A: UST_co) -> NDArray[int_]: ...
+
+# Overload 1 and 2: str- or bytes-based array-likes
+# overload 3 and 4: arbitrary object with unicode=False (-> bytes_)
+# overload 5 and 6: arbitrary object with unicode=True (-> str_)
+# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_)
+@overload
+def array(
+ obj: U_co,
+ itemsize: int | None = ...,
+ copy: bool = ...,
+ unicode: L[True] | None = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def array(
+ obj: S_co,
+ itemsize: int | None = ...,
+ copy: bool = ...,
+ unicode: L[False] | None = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def array(
+ obj: object,
+ itemsize: int | None,
+ copy: bool,
+ unicode: L[False],
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def array(
+ obj: object,
+ itemsize: int | None = ...,
+ copy: bool = ...,
+ *,
+ unicode: L[False],
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def array(
+ obj: object,
+ itemsize: int | None,
+ copy: bool,
+ unicode: L[True],
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def array(
+ obj: object,
+ itemsize: int | None = ...,
+ copy: bool = ...,
+ *,
+ unicode: L[True],
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def array(
+ obj: object,
+ itemsize: int | None = ...,
+ copy: bool = ...,
+ unicode: bool | None = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_] | _CharArray[bytes_]: ...
+
+@overload
+def asarray(
+ obj: U_co,
+ itemsize: int | None = ...,
+ unicode: L[True] | None = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def asarray(
+ obj: S_co,
+ itemsize: int | None = ...,
+ unicode: L[False] | None = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def asarray(
+ obj: object,
+ itemsize: int | None,
+ unicode: L[False],
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def asarray(
+ obj: object,
+ itemsize: int | None = ...,
+ *,
+ unicode: L[False],
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def asarray(
+ obj: object,
+ itemsize: int | None,
+ unicode: L[True],
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def asarray(
+ obj: object,
+ itemsize: int | None = ...,
+ *,
+ unicode: L[True],
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def asarray(
+ obj: object,
+ itemsize: int | None = ...,
+ unicode: bool | None = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_] | _CharArray[bytes_]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.py b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.py
new file mode 100644
index 00000000..8e71e6d4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.py
@@ -0,0 +1,1498 @@
+"""
+Implementation of optimized einsum.
+
+"""
+import itertools
+import operator
+
+from numpy._core.multiarray import c_einsum
+from numpy._core.numeric import asanyarray, tensordot
+from numpy._core.overrides import array_function_dispatch
+
+__all__ = ['einsum', 'einsum_path']
+
+# importing string for string.ascii_letters would be too slow
+# the first import before caching has been measured to take 800 µs (#23777)
+# imports begin with uppercase to mimic ASCII values to avoid sorting issues
+einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
+einsum_symbols_set = set(einsum_symbols)
+
+
+def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
+ """
+ Computes the number of FLOPS in the contraction.
+
+ Parameters
+ ----------
+ idx_contraction : iterable
+ The indices involved in the contraction
+ inner : bool
+ Does this contraction require an inner product?
+ num_terms : int
+ The number of terms in a contraction
+ size_dictionary : dict
+ The size of each of the indices in idx_contraction
+
+ Returns
+ -------
+ flop_count : int
+ The total number of FLOPS required for the contraction.
+
+ Examples
+ --------
+
+ >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
+ 30
+
+ >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
+ 60
+
+ """
+
+ overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
+ op_factor = max(1, num_terms - 1)
+ if inner:
+ op_factor += 1
+
+ return overall_size * op_factor
+
+def _compute_size_by_dict(indices, idx_dict):
+ """
+ Computes the product of the elements in indices based on the dictionary
+ idx_dict.
+
+ Parameters
+ ----------
+ indices : iterable
+ Indices to base the product on.
+ idx_dict : dictionary
+ Dictionary of index sizes
+
+ Returns
+ -------
+ ret : int
+ The resulting product.
+
+ Examples
+ --------
+ >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
+ 90
+
+ """
+ ret = 1
+ for i in indices:
+ ret *= idx_dict[i]
+ return ret
+
+
+def _find_contraction(positions, input_sets, output_set):
+ """
+ Finds the contraction for a given set of input and output sets.
+
+ Parameters
+ ----------
+ positions : iterable
+ Integer positions of terms used in the contraction.
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+
+ Returns
+ -------
+ new_result : set
+ The indices of the resulting contraction
+ remaining : list
+ List of sets that have not been contracted, the new set is appended to
+ the end of this list
+ idx_removed : set
+ Indices removed from the entire contraction
+ idx_contraction : set
+ The indices used in the current contraction
+
+ Examples
+ --------
+
+ # A simple dot product test case
+ >>> pos = (0, 1)
+ >>> isets = [set('ab'), set('bc')]
+ >>> oset = set('ac')
+ >>> _find_contraction(pos, isets, oset)
+ ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
+
+ # A more complex case with additional terms in the contraction
+ >>> pos = (0, 2)
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set('ac')
+ >>> _find_contraction(pos, isets, oset)
+ ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
+ """
+
+ idx_contract = set()
+ idx_remain = output_set.copy()
+ remaining = []
+ for ind, value in enumerate(input_sets):
+ if ind in positions:
+ idx_contract |= value
+ else:
+ remaining.append(value)
+ idx_remain |= value
+
+ new_result = idx_remain & idx_contract
+ idx_removed = (idx_contract - new_result)
+ remaining.append(new_result)
+
+ return (new_result, remaining, idx_removed, idx_contract)
+
+
+def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
+ """
+ Computes all possible pair contractions, sieves the results based
+ on ``memory_limit`` and returns the lowest cost path. This algorithm
+ scales factorial with respect to the elements in the list ``input_sets``.
+
+ Parameters
+ ----------
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+ idx_dict : dictionary
+ Dictionary of index sizes
+ memory_limit : int
+ The maximum number of elements in a temporary array
+
+ Returns
+ -------
+ path : list
+ The optimal contraction order within the memory limit constraint.
+
+ Examples
+ --------
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set()
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+ >>> _optimal_path(isets, oset, idx_sizes, 5000)
+ [(0, 2), (0, 1)]
+ """
+
+ full_results = [(0, [], input_sets)]
+ for iteration in range(len(input_sets) - 1):
+ iter_results = []
+
+ # Compute all unique pairs
+ for curr in full_results:
+ cost, positions, remaining = curr
+ for con in itertools.combinations(
+ range(len(input_sets) - iteration), 2
+ ):
+
+ # Find the contraction
+ cont = _find_contraction(con, remaining, output_set)
+ new_result, new_input_sets, idx_removed, idx_contract = cont
+
+ # Sieve the results based on memory_limit
+ new_size = _compute_size_by_dict(new_result, idx_dict)
+ if new_size > memory_limit:
+ continue
+
+ # Build (total_cost, positions, indices_remaining)
+ total_cost = cost + _flop_count(
+ idx_contract, idx_removed, len(con), idx_dict
+ )
+ new_pos = positions + [con]
+ iter_results.append((total_cost, new_pos, new_input_sets))
+
+ # Update combinatorial list, if we did not find anything return best
+ # path + remaining contractions
+ if iter_results:
+ full_results = iter_results
+ else:
+ path = min(full_results, key=lambda x: x[0])[1]
+ path += [tuple(range(len(input_sets) - iteration))]
+ return path
+
+ # If we have not found anything return single einsum contraction
+ if len(full_results) == 0:
+ return [tuple(range(len(input_sets)))]
+
+ path = min(full_results, key=lambda x: x[0])[1]
+ return path
+
+def _parse_possible_contraction(
+ positions, input_sets, output_set, idx_dict,
+ memory_limit, path_cost, naive_cost
+ ):
+ """Compute the cost (removed size + flops) and resultant indices for
+ performing the contraction specified by ``positions``.
+
+ Parameters
+ ----------
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ input_sets : list of sets
+ The indices found on each tensors.
+ output_set : set
+ The output indices of the expression.
+ idx_dict : dict
+ Mapping of each index to its size.
+ memory_limit : int
+ The total allowed size for an intermediary tensor.
+ path_cost : int
+ The contraction cost so far.
+ naive_cost : int
+ The cost of the unoptimized expression.
+
+ Returns
+ -------
+ cost : (int, int)
+ A tuple containing the size of any indices removed, and the flop cost.
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ new_input_sets : list of sets
+ The resulting new list of indices if this proposed contraction
+ is performed.
+
+ """
+
+ # Find the contraction
+ contract = _find_contraction(positions, input_sets, output_set)
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+
+ # Sieve the results based on memory_limit
+ new_size = _compute_size_by_dict(idx_result, idx_dict)
+ if new_size > memory_limit:
+ return None
+
+ # Build sort tuple
+ old_sizes = (
+ _compute_size_by_dict(input_sets[p], idx_dict) for p in positions
+ )
+ removed_size = sum(old_sizes) - new_size
+
+ # NB: removed_size used to be just the size of any removed indices i.e.:
+ # helpers.compute_size_by_dict(idx_removed, idx_dict)
+ cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
+ sort = (-removed_size, cost)
+
+ # Sieve based on total cost as well
+ if (path_cost + cost) > naive_cost:
+ return None
+
+ # Add contraction to possible choices
+ return [sort, positions, new_input_sets]
+
+
+def _update_other_results(results, best):
+ """Update the positions and provisional input_sets of ``results``
+ based on performing the contraction result ``best``. Remove any
+ involving the tensors contracted.
+
+ Parameters
+ ----------
+ results : list
+ List of contraction results produced by
+ ``_parse_possible_contraction``.
+ best : list
+ The best contraction of ``results`` i.e. the one that
+ will be performed.
+
+ Returns
+ -------
+ mod_results : list
+ The list of modified results, updated with outcome of
+ ``best`` contraction.
+ """
+
+ best_con = best[1]
+ bx, by = best_con
+ mod_results = []
+
+ for cost, (x, y), con_sets in results:
+
+ # Ignore results involving tensors just contracted
+ if x in best_con or y in best_con:
+ continue
+
+ # Update the input_sets
+ del con_sets[by - int(by > x) - int(by > y)]
+ del con_sets[bx - int(bx > x) - int(bx > y)]
+ con_sets.insert(-1, best[2][-1])
+
+ # Update the position indices
+ mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
+ mod_results.append((cost, mod_con, con_sets))
+
+ return mod_results
+
+def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
+ """
+ Finds the path by contracting the best pair until the input list is
+ exhausted. The best pair is found by minimizing the tuple
+ ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
+ matrix multiplication or inner product operations, then Hadamard like
+ operations, and finally outer operations. Outer products are limited by
+ ``memory_limit``. This algorithm scales cubically with respect to the
+ number of elements in the list ``input_sets``.
+
+ Parameters
+ ----------
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+ idx_dict : dictionary
+ Dictionary of index sizes
+ memory_limit : int
+ The maximum number of elements in a temporary array
+
+ Returns
+ -------
+ path : list
+ The greedy contraction order within the memory limit constraint.
+
+ Examples
+ --------
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set()
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+ >>> _greedy_path(isets, oset, idx_sizes, 5000)
+ [(0, 2), (0, 1)]
+ """
+
+ # Handle trivial cases that leaked through
+ if len(input_sets) == 1:
+ return [(0,)]
+ elif len(input_sets) == 2:
+ return [(0, 1)]
+
+ # Build up a naive cost
+ contract = _find_contraction(
+ range(len(input_sets)), input_sets, output_set
+ )
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+ naive_cost = _flop_count(
+ idx_contract, idx_removed, len(input_sets), idx_dict
+ )
+
+ # Initially iterate over all pairs
+ comb_iter = itertools.combinations(range(len(input_sets)), 2)
+ known_contractions = []
+
+ path_cost = 0
+ path = []
+
+ for iteration in range(len(input_sets) - 1):
+
+ # Iterate over all pairs on the first step, only previously
+ # found pairs on subsequent steps
+ for positions in comb_iter:
+
+ # Always initially ignore outer products
+ if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
+ continue
+
+ result = _parse_possible_contraction(
+ positions, input_sets, output_set, idx_dict,
+ memory_limit, path_cost, naive_cost
+ )
+ if result is not None:
+ known_contractions.append(result)
+
+ # If we do not have a inner contraction, rescan pairs
+ # including outer products
+ if len(known_contractions) == 0:
+
+ # Then check the outer products
+ for positions in itertools.combinations(
+ range(len(input_sets)), 2
+ ):
+ result = _parse_possible_contraction(
+ positions, input_sets, output_set, idx_dict,
+ memory_limit, path_cost, naive_cost
+ )
+ if result is not None:
+ known_contractions.append(result)
+
+ # If we still did not find any remaining contractions,
+ # default back to einsum like behavior
+ if len(known_contractions) == 0:
+ path.append(tuple(range(len(input_sets))))
+ break
+
+ # Sort based on first index
+ best = min(known_contractions, key=lambda x: x[0])
+
+ # Now propagate as many unused contractions as possible
+ # to the next iteration
+ known_contractions = _update_other_results(known_contractions, best)
+
+ # Next iteration only compute contractions with the new tensor
+ # All other contractions have been accounted for
+ input_sets = best[2]
+ new_tensor_pos = len(input_sets) - 1
+ comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
+
+ # Update path and total cost
+ path.append(best[1])
+ path_cost += best[0][1]
+
+ return path
+
+
+def _can_dot(inputs, result, idx_removed):
+ """
+ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
+
+ Parameters
+ ----------
+ inputs : list of str
+ Specifies the subscripts for summation.
+ result : str
+ Resulting summation.
+ idx_removed : set
+ Indices that are removed in the summation
+
+
+ Returns
+ -------
+ type : bool
+ Returns true if BLAS should and can be used, else False
+
+ Notes
+ -----
+ If the operations is BLAS level 1 or 2 and is not already aligned
+ we default back to einsum as the memory movement to copy is more
+ costly than the operation itself.
+
+
+ Examples
+ --------
+
+ # Standard GEMM operation
+ >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
+ True
+
+ # Can use the standard BLAS, but requires odd data movement
+ >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
+ False
+
+ # DDOT where the memory is not aligned
+ >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
+ False
+
+ """
+
+ # All `dot` calls remove indices
+ if len(idx_removed) == 0:
+ return False
+
+ # BLAS can only handle two operands
+ if len(inputs) != 2:
+ return False
+
+ input_left, input_right = inputs
+
+ for c in set(input_left + input_right):
+ # can't deal with repeated indices on same input or more than 2 total
+ nl, nr = input_left.count(c), input_right.count(c)
+ if (nl > 1) or (nr > 1) or (nl + nr > 2):
+ return False
+
+ # can't do implicit summation or dimension collapse e.g.
+ # "ab,bc->c" (implicitly sum over 'a')
+ # "ab,ca->ca" (take diagonal of 'a')
+ if nl + nr - 1 == int(c in result):
+ return False
+
+ # Build a few temporaries
+ set_left = set(input_left)
+ set_right = set(input_right)
+ keep_left = set_left - idx_removed
+ keep_right = set_right - idx_removed
+ rs = len(idx_removed)
+
+ # At this point we are a DOT, GEMV, or GEMM operation
+
+ # Handle inner products
+
+ # DDOT with aligned data
+ if input_left == input_right:
+ return True
+
+ # DDOT without aligned data (better to use einsum)
+ if set_left == set_right:
+ return False
+
+ # Handle the 4 possible (aligned) GEMV or GEMM cases
+
+ # GEMM or GEMV no transpose
+ if input_left[-rs:] == input_right[:rs]:
+ return True
+
+ # GEMM or GEMV transpose both
+ if input_left[:rs] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose right
+ if input_left[-rs:] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose left
+ if input_left[:rs] == input_right[:rs]:
+ return True
+
+ # Einsum is faster than GEMV if we have to copy data
+ if not keep_left or not keep_right:
+ return False
+
+ # We are a matrix-matrix product, but we need to copy data
+ return True
+
+
+def _parse_einsum_input(operands):
+ """
+ A reproduction of einsum c side einsum parsing in python.
+
+ Returns
+ -------
+ input_strings : str
+ Parsed input strings
+ output_string : str
+ Parsed output string
+ operands : list of array_like
+ The operands to use in the numpy contraction
+
+ Examples
+ --------
+ The operand list is simplified to reduce printing:
+
+ >>> np.random.seed(123)
+ >>> a = np.random.rand(4, 4)
+ >>> b = np.random.rand(4, 4, 4)
+ >>> _parse_einsum_input(('...a,...a->...', a, b))
+ ('za,xza', 'xz', [a, b]) # may vary
+
+ >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
+ ('za,xza', 'xz', [a, b]) # may vary
+ """
+
+ if len(operands) == 0:
+ raise ValueError("No input operands")
+
+ if isinstance(operands[0], str):
+ subscripts = operands[0].replace(" ", "")
+ operands = [asanyarray(v) for v in operands[1:]]
+
+ # Ensure all characters are valid
+ for s in subscripts:
+ if s in '.,->':
+ continue
+ if s not in einsum_symbols:
+ raise ValueError(f"Character {s} is not a valid symbol.")
+
+ else:
+ tmp_operands = list(operands)
+ operand_list = []
+ subscript_list = []
+ for p in range(len(operands) // 2):
+ operand_list.append(tmp_operands.pop(0))
+ subscript_list.append(tmp_operands.pop(0))
+
+ output_list = tmp_operands[-1] if len(tmp_operands) else None
+ operands = [asanyarray(v) for v in operand_list]
+ subscripts = ""
+ last = len(subscript_list) - 1
+ for num, sub in enumerate(subscript_list):
+ for s in sub:
+ if s is Ellipsis:
+ subscripts += "..."
+ else:
+ try:
+ s = operator.index(s)
+ except TypeError as e:
+ raise TypeError(
+ "For this input type lists must contain "
+ "either int or Ellipsis"
+ ) from e
+ subscripts += einsum_symbols[s]
+ if num != last:
+ subscripts += ","
+
+ if output_list is not None:
+ subscripts += "->"
+ for s in output_list:
+ if s is Ellipsis:
+ subscripts += "..."
+ else:
+ try:
+ s = operator.index(s)
+ except TypeError as e:
+ raise TypeError(
+ "For this input type lists must contain "
+ "either int or Ellipsis"
+ ) from e
+ subscripts += einsum_symbols[s]
+ # Check for proper "->"
+ if ("-" in subscripts) or (">" in subscripts):
+ invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
+ if invalid or (subscripts.count("->") != 1):
+ raise ValueError("Subscripts can only contain one '->'.")
+
+ # Parse ellipses
+ if "." in subscripts:
+ used = subscripts.replace(".", "").replace(",", "").replace("->", "")
+ unused = list(einsum_symbols_set - set(used))
+ ellipse_inds = "".join(unused)
+ longest = 0
+
+ if "->" in subscripts:
+ input_tmp, output_sub = subscripts.split("->")
+ split_subscripts = input_tmp.split(",")
+ out_sub = True
+ else:
+ split_subscripts = subscripts.split(',')
+ out_sub = False
+
+ for num, sub in enumerate(split_subscripts):
+ if "." in sub:
+ if (sub.count(".") != 3) or (sub.count("...") != 1):
+ raise ValueError("Invalid Ellipses.")
+
+ # Take into account numerical values
+ if operands[num].shape == ():
+ ellipse_count = 0
+ else:
+ ellipse_count = max(operands[num].ndim, 1)
+ ellipse_count -= (len(sub) - 3)
+
+ if ellipse_count > longest:
+ longest = ellipse_count
+
+ if ellipse_count < 0:
+ raise ValueError("Ellipses lengths do not match.")
+ elif ellipse_count == 0:
+ split_subscripts[num] = sub.replace('...', '')
+ else:
+ rep_inds = ellipse_inds[-ellipse_count:]
+ split_subscripts[num] = sub.replace('...', rep_inds)
+
+ subscripts = ",".join(split_subscripts)
+ if longest == 0:
+ out_ellipse = ""
+ else:
+ out_ellipse = ellipse_inds[-longest:]
+
+ if out_sub:
+ subscripts += "->" + output_sub.replace("...", out_ellipse)
+ else:
+ # Special care for outputless ellipses
+ output_subscript = ""
+ tmp_subscripts = subscripts.replace(",", "")
+ for s in sorted(set(tmp_subscripts)):
+ if s not in (einsum_symbols):
+ raise ValueError(f"Character {s} is not a valid symbol.")
+ if tmp_subscripts.count(s) == 1:
+ output_subscript += s
+ normal_inds = ''.join(sorted(set(output_subscript) -
+ set(out_ellipse)))
+
+ subscripts += "->" + out_ellipse + normal_inds
+
+ # Build output string if does not exist
+ if "->" in subscripts:
+ input_subscripts, output_subscript = subscripts.split("->")
+ else:
+ input_subscripts = subscripts
+ # Build output subscripts
+ tmp_subscripts = subscripts.replace(",", "")
+ output_subscript = ""
+ for s in sorted(set(tmp_subscripts)):
+ if s not in einsum_symbols:
+ raise ValueError(f"Character {s} is not a valid symbol.")
+ if tmp_subscripts.count(s) == 1:
+ output_subscript += s
+
+ # Make sure output subscripts are in the input
+ for char in output_subscript:
+ if output_subscript.count(char) != 1:
+ raise ValueError("Output character %s appeared more than once in "
+ "the output." % char)
+ if char not in input_subscripts:
+ raise ValueError(f"Output character {char} did not appear in the input")
+
+ # Make sure number operands is equivalent to the number of terms
+ if len(input_subscripts.split(',')) != len(operands):
+ raise ValueError("Number of einsum subscripts must be equal to the "
+ "number of operands.")
+
+ return (input_subscripts, output_subscript, operands)
+
+
+def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
+ # NOTE: technically, we should only dispatch on array-like arguments, not
+ # subscripts (given as strings). But separating operands into
+ # arrays/subscripts is a little tricky/slow (given einsum's two supported
+ # signatures), so as a practical shortcut we dispatch on everything.
+ # Strings will be ignored for dispatching since they don't define
+ # __array_function__.
+ return operands
+
+
+@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
+def einsum_path(*operands, optimize='greedy', einsum_call=False):
+ """
+ einsum_path(subscripts, *operands, optimize='greedy')
+
+ Evaluates the lowest cost contraction order for an einsum expression by
+ considering the creation of intermediate arrays.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation.
+ *operands : list of array_like
+ These are the arrays for the operation.
+ optimize : {bool, list, tuple, 'greedy', 'optimal'}
+ Choose the type of path. If a tuple is provided, the second argument is
+ assumed to be the maximum intermediate size created. If only a single
+ argument is provided the largest input or output array size is used
+ as a maximum intermediate size.
+
+ * if a list is given that starts with ``einsum_path``, uses this as the
+ contraction path
+ * if False no optimization is taken
+ * if True defaults to the 'greedy' algorithm
+ * 'optimal' An algorithm that combinatorially explores all possible
+ ways of contracting the listed tensors and chooses the least costly
+ path. Scales exponentially with the number of terms in the
+ contraction.
+ * 'greedy' An algorithm that chooses the best pair contraction
+ at each step. Effectively, this algorithm searches the largest inner,
+ Hadamard, and then outer products at each step. Scales cubically with
+ the number of terms in the contraction. Equivalent to the 'optimal'
+ path for most contractions.
+
+ Default is 'greedy'.
+
+ Returns
+ -------
+ path : list of tuples
+ A list representation of the einsum path.
+ string_repr : str
+ A printable representation of the einsum path.
+
+ Notes
+ -----
+ The resulting path indicates which terms of the input contraction should be
+ contracted first, the result of this contraction is then appended to the
+ end of the contraction list. This list can then be iterated over until all
+ intermediate contractions are complete.
+
+ See Also
+ --------
+ einsum, linalg.multi_dot
+
+ Examples
+ --------
+
+ We can begin with a chain dot example. In this case, it is optimal to
+ contract the ``b`` and ``c`` tensors first as represented by the first
+ element of the path ``(1, 2)``. The resulting tensor is added to the end
+ of the contraction and the remaining contraction ``(0, 1)`` is then
+ completed.
+
+ >>> np.random.seed(123)
+ >>> a = np.random.rand(2, 2)
+ >>> b = np.random.rand(2, 5)
+ >>> c = np.random.rand(5, 2)
+ >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
+ >>> print(path_info[0])
+ ['einsum_path', (1, 2), (0, 1)]
+ >>> print(path_info[1])
+ Complete contraction: ij,jk,kl->il # may vary
+ Naive scaling: 4
+ Optimized scaling: 3
+ Naive FLOP count: 1.600e+02
+ Optimized FLOP count: 5.600e+01
+ Theoretical speedup: 2.857
+ Largest intermediate: 4.000e+00 elements
+ -------------------------------------------------------------------------
+ scaling current remaining
+ -------------------------------------------------------------------------
+ 3 kl,jk->jl ij,jl->il
+ 3 jl,ij->il il->il
+
+
+ A more complex index transformation example.
+
+ >>> I = np.random.rand(10, 10, 10, 10)
+ >>> C = np.random.rand(10, 10)
+ >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
+ ... optimize='greedy')
+
+ >>> print(path_info[0])
+ ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
+ >>> print(path_info[1])
+ Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
+ Naive scaling: 8
+ Optimized scaling: 5
+ Naive FLOP count: 8.000e+08
+ Optimized FLOP count: 8.000e+05
+ Theoretical speedup: 1000.000
+ Largest intermediate: 1.000e+04 elements
+ --------------------------------------------------------------------------
+ scaling current remaining
+ --------------------------------------------------------------------------
+ 5 abcd,ea->bcde fb,gc,hd,bcde->efgh
+ 5 bcde,fb->cdef gc,hd,cdef->efgh
+ 5 cdef,gc->defg hd,defg->efgh
+ 5 defg,hd->efgh efgh->efgh
+ """
+
+ # Figure out what the path really is
+ path_type = optimize
+ if path_type is True:
+ path_type = 'greedy'
+ if path_type is None:
+ path_type = False
+
+ explicit_einsum_path = False
+ memory_limit = None
+
+ # No optimization or a named path algorithm
+ if (path_type is False) or isinstance(path_type, str):
+ pass
+
+ # Given an explicit path
+ elif len(path_type) and (path_type[0] == 'einsum_path'):
+ explicit_einsum_path = True
+
+ # Path tuple with memory limit
+ elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
+ isinstance(path_type[1], (int, float))):
+ memory_limit = int(path_type[1])
+ path_type = path_type[0]
+
+ else:
+ raise TypeError(f"Did not understand the path: {str(path_type)}")
+
+ # Hidden option, only einsum should call this
+ einsum_call_arg = einsum_call
+
+ # Python side parsing
+ input_subscripts, output_subscript, operands = (
+ _parse_einsum_input(operands)
+ )
+
+ # Build a few useful list and sets
+ input_list = input_subscripts.split(',')
+ input_sets = [set(x) for x in input_list]
+ output_set = set(output_subscript)
+ indices = set(input_subscripts.replace(',', ''))
+
+ # Get length of each unique dimension and ensure all dimensions are correct
+ dimension_dict = {}
+ broadcast_indices = [[] for x in range(len(input_list))]
+ for tnum, term in enumerate(input_list):
+ sh = operands[tnum].shape
+ if len(sh) != len(term):
+ raise ValueError("Einstein sum subscript %s does not contain the "
+ "correct number of indices for operand %d."
+ % (input_subscripts[tnum], tnum))
+ for cnum, char in enumerate(term):
+ dim = sh[cnum]
+
+ # Build out broadcast indices
+ if dim == 1:
+ broadcast_indices[tnum].append(char)
+
+ if char in dimension_dict.keys():
+ # For broadcasting cases we always want the largest dim size
+ if dimension_dict[char] == 1:
+ dimension_dict[char] = dim
+ elif dim not in (1, dimension_dict[char]):
+ raise ValueError("Size of label '%s' for operand %d (%d) "
+ "does not match previous terms (%d)."
+ % (char, tnum, dimension_dict[char], dim))
+ else:
+ dimension_dict[char] = dim
+
+ # Convert broadcast inds to sets
+ broadcast_indices = [set(x) for x in broadcast_indices]
+
+ # Compute size of each input array plus the output array
+ size_list = [_compute_size_by_dict(term, dimension_dict)
+ for term in input_list + [output_subscript]]
+ max_size = max(size_list)
+
+ if memory_limit is None:
+ memory_arg = max_size
+ else:
+ memory_arg = memory_limit
+
+ # Compute naive cost
+ # This isn't quite right, need to look into exactly how einsum does this
+ inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
+ naive_cost = _flop_count(
+ indices, inner_product, len(input_list), dimension_dict
+ )
+
+ # Compute the path
+ if explicit_einsum_path:
+ path = path_type[1:]
+ elif (
+ (path_type is False)
+ or (len(input_list) in [1, 2])
+ or (indices == output_set)
+ ):
+ # Nothing to be optimized, leave it to einsum
+ path = [tuple(range(len(input_list)))]
+ elif path_type == "greedy":
+ path = _greedy_path(
+ input_sets, output_set, dimension_dict, memory_arg
+ )
+ elif path_type == "optimal":
+ path = _optimal_path(
+ input_sets, output_set, dimension_dict, memory_arg
+ )
+ else:
+ raise KeyError("Path name %s not found", path_type)
+
+ cost_list, scale_list, size_list, contraction_list = [], [], [], []
+
+ # Build contraction tuple (positions, gemm, einsum_str, remaining)
+ for cnum, contract_inds in enumerate(path):
+ # Make sure we remove inds from right to left
+ contract_inds = tuple(sorted(contract_inds, reverse=True))
+
+ contract = _find_contraction(contract_inds, input_sets, output_set)
+ out_inds, input_sets, idx_removed, idx_contract = contract
+
+ cost = _flop_count(
+ idx_contract, idx_removed, len(contract_inds), dimension_dict
+ )
+ cost_list.append(cost)
+ scale_list.append(len(idx_contract))
+ size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
+
+ bcast = set()
+ tmp_inputs = []
+ for x in contract_inds:
+ tmp_inputs.append(input_list.pop(x))
+ bcast |= broadcast_indices.pop(x)
+
+ new_bcast_inds = bcast - idx_removed
+
+ # If we're broadcasting, nix blas
+ if not len(idx_removed & bcast):
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+ else:
+ do_blas = False
+
+ # Last contraction
+ if (cnum - len(path)) == -1:
+ idx_result = output_subscript
+ else:
+ sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
+ idx_result = "".join([x[1] for x in sorted(sort_result)])
+
+ input_list.append(idx_result)
+ broadcast_indices.append(new_bcast_inds)
+ einsum_str = ",".join(tmp_inputs) + "->" + idx_result
+
+ contraction = (
+ contract_inds, idx_removed, einsum_str, input_list[:], do_blas
+ )
+ contraction_list.append(contraction)
+
+ opt_cost = sum(cost_list) + 1
+
+ if len(input_list) != 1:
+ # Explicit "einsum_path" is usually trusted, but we detect this kind of
+ # mistake in order to prevent from returning an intermediate value.
+ raise RuntimeError(
+ f"Invalid einsum_path is specified: {len(input_list) - 1} more "
+ "operands has to be contracted.")
+
+ if einsum_call_arg:
+ return (operands, contraction_list)
+
+ # Return the path along with a nice string representation
+ overall_contraction = input_subscripts + "->" + output_subscript
+ header = ("scaling", "current", "remaining")
+
+ speedup = naive_cost / opt_cost
+ max_i = max(size_list)
+
+ path_print = f" Complete contraction: {overall_contraction}\n"
+ path_print += f" Naive scaling: {len(indices)}\n"
+ path_print += " Optimized scaling: %d\n" % max(scale_list)
+ path_print += f" Naive FLOP count: {naive_cost:.3e}\n"
+ path_print += f" Optimized FLOP count: {opt_cost:.3e}\n"
+ path_print += f" Theoretical speedup: {speedup:3.3f}\n"
+ path_print += f" Largest intermediate: {max_i:.3e} elements\n"
+ path_print += "-" * 74 + "\n"
+ path_print += "%6s %24s %40s\n" % header
+ path_print += "-" * 74
+
+ for n, contraction in enumerate(contraction_list):
+ inds, idx_rm, einsum_str, remaining, blas = contraction
+ remaining_str = ",".join(remaining) + "->" + output_subscript
+ path_run = (scale_list[n], einsum_str, remaining_str)
+ path_print += "\n%4d %24s %40s" % path_run
+
+ path = ['einsum_path'] + path
+ return (path, path_print)
+
+
+def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
+ # Arguably we dispatch on more arguments than we really should; see note in
+ # _einsum_path_dispatcher for why.
+ yield from operands
+ yield out
+
+
+# Rewrite einsum to handle different cases
+@array_function_dispatch(_einsum_dispatcher, module='numpy')
+def einsum(*operands, out=None, optimize=False, **kwargs):
+ """
+ einsum(subscripts, *operands, out=None, dtype=None, order='K',
+ casting='safe', optimize=False)
+
+ Evaluates the Einstein summation convention on the operands.
+
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
+
+ See the notes and examples for clarification.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
+ operands : list of array_like
+ These are the arrays for the operation.
+ out : ndarray, optional
+ If provided, the calculation is done into this array.
+ dtype : {data-type, None}, optional
+ If provided, forces the calculation to use the data type specified.
+ Note that you may have to also give a more liberal `casting`
+ parameter to allow the conversions. Default is None.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the output. 'C' means it should
+ be C contiguous. 'F' means it should be Fortran contiguous,
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+ 'K' means it should be as close to the layout as the inputs as
+ is possible, including arbitrarily permuted axes.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Setting this to
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Default is 'safe'.
+ optimize : {False, True, 'greedy', 'optimal'}, optional
+ Controls if intermediate optimization should occur. No optimization
+ will occur if False and True will default to the 'greedy' algorithm.
+ Also accepts an explicit contraction list from the ``np.einsum_path``
+ function. See ``np.einsum_path`` for more details. Defaults to False.
+
+ Returns
+ -------
+ output : ndarray
+ The calculation based on the Einstein summation convention.
+
+ See Also
+ --------
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+ einsum:
+ Similar verbose interface is provided by the
+ `einops `_ package to cover
+ additional operations: transpose, reshape/flatten, repeat/tile,
+ squeeze/unsqueeze and reductions.
+ The `opt_einsum `_
+ optimizes contraction order for einsum-like expressions
+ in backend-agnostic manner.
+
+ Notes
+ -----
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
+
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
+
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul`
+ :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner`
+ :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication,
+ :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order,
+ :py:func:`numpy.einsum_path`.
+
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) `. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)``
+ produces a view of ``a`` with no changes. A further example
+ ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication
+ and is equivalent to :py:func:`np.matmul(a,b) `.
+ Repeated subscript labels in one operand take the diagonal.
+ For example, ``np.einsum('ii', a)`` is equivalent to
+ :py:func:`np.trace(a) `.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) `
+ if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)``
+ is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
+
+ To enable and control broadcasting, use an ellipsis. Default
+ NumPy-style broadcasting is done by adding an ellipsis
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
+ ``np.einsum('...i->...', a)`` is like
+ :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape.
+ To take the trace along the first and last axes,
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+ product with the left-most indices instead of rightmost, one can do
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+ When there is only one operand, no axes are summed, and no output
+ parameter is provided, a view into the operand is returned instead
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+ produces a view (changed in version 1.10.0).
+
+ `einsum` also provides an alternative way to provide the subscripts and
+ operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
+
+ Views returned from einsum are now writeable whenever the input array
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) `
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ of a 2D array.
+
+ Added the ``optimize`` argument which will optimize the contraction order
+ of an einsum expression. For a contraction with three or more operands
+ this can greatly increase the computational efficiency at the cost of
+ a larger memory footprint during computation.
+
+ Typically a 'greedy' algorithm is applied which empirical tests have shown
+ returns the optimal path in the majority of cases. In some cases 'optimal'
+ will return the superlative path through a more expensive, exhaustive
+ search. For iterative calculations it may be advisable to calculate
+ the optimal path once and reuse that path by supplying it as an argument.
+ An example is given below.
+
+ See :py:func:`numpy.einsum_path` for more details.
+
+ Examples
+ --------
+ >>> a = np.arange(25).reshape(5,5)
+ >>> b = np.arange(5)
+ >>> c = np.arange(6).reshape(2,3)
+
+ Trace of a matrix:
+
+ >>> np.einsum('ii', a)
+ 60
+ >>> np.einsum(a, [0,0])
+ 60
+ >>> np.trace(a)
+ 60
+
+ Extract the diagonal (requires explicit form):
+
+ >>> np.einsum('ii->i', a)
+ array([ 0, 6, 12, 18, 24])
+ >>> np.einsum(a, [0,0], [0])
+ array([ 0, 6, 12, 18, 24])
+ >>> np.diag(a)
+ array([ 0, 6, 12, 18, 24])
+
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done
+ with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
+
+ >>> np.einsum('ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum(c, [1,0])
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.transpose(c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
+ >>> np.einsum('..., ...', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(',ij', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.multiply(3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+
+ Vector outer product:
+
+ >>> np.einsum('i,j', np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.outer(np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+
+ Tensor contraction:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> np.einsum('ijk,jil->kl', a, b)
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+
+ Example of ellipsis use:
+
+ >>> a = np.arange(6).reshape((3,2))
+ >>> b = np.arange(12).reshape((4,3))
+ >>> np.einsum('ki,jk->ij', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('ki,...k->i...', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('k...,jk', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+
+ Chained array operations. For more complicated contractions, speed ups
+ might be achieved by repeatedly computing a 'greedy' path or pre-computing
+ the 'optimal' path and repeatedly applying it, using an `einsum_path`
+ insertion (since version 1.12.0). Performance improvements can be
+ particularly significant with larger arrays:
+
+ >>> a = np.ones(64).reshape(2,4,8)
+
+ Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
+
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
+
+ Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
+
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,
+ ... optimize='optimal')
+
+ Greedy `einsum` (faster optimal path approximation): ~160ms
+
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
+
+ Optimal `einsum` (best usage pattern in some use cases): ~110ms
+
+ >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,
+ ... optimize='optimal')[0]
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
+
+ """
+ # Special handling if out is specified
+ specified_out = out is not None
+
+ # If no optimization, run pure einsum
+ if optimize is False:
+ if specified_out:
+ kwargs['out'] = out
+ return c_einsum(*operands, **kwargs)
+
+ # Check the kwargs to avoid a more cryptic error later, without having to
+ # repeat default values here
+ valid_einsum_kwargs = ['dtype', 'order', 'casting']
+ unknown_kwargs = [k for (k, v) in kwargs.items() if
+ k not in valid_einsum_kwargs]
+ if len(unknown_kwargs):
+ raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}")
+
+ # Build the contraction list and operand
+ operands, contraction_list = einsum_path(*operands, optimize=optimize,
+ einsum_call=True)
+
+ # Handle order kwarg for output array, c_einsum allows mixed case
+ output_order = kwargs.pop('order', 'K')
+ if output_order.upper() == 'A':
+ if all(arr.flags.f_contiguous for arr in operands):
+ output_order = 'F'
+ else:
+ output_order = 'C'
+
+ # Start contraction loop
+ for num, contraction in enumerate(contraction_list):
+ inds, idx_rm, einsum_str, remaining, blas = contraction
+ tmp_operands = [operands.pop(x) for x in inds]
+
+ # Do we need to deal with the output?
+ handle_out = specified_out and ((num + 1) == len(contraction_list))
+
+ # Call tensordot if still possible
+ if blas:
+ # Checks have already been handled
+ input_str, results_index = einsum_str.split('->')
+ input_left, input_right = input_str.split(',')
+
+ tensor_result = input_left + input_right
+ for s in idx_rm:
+ tensor_result = tensor_result.replace(s, "")
+
+ # Find indices to contract over
+ left_pos, right_pos = [], []
+ for s in sorted(idx_rm):
+ left_pos.append(input_left.find(s))
+ right_pos.append(input_right.find(s))
+
+ # Contract!
+ new_view = tensordot(
+ *tmp_operands, axes=(tuple(left_pos), tuple(right_pos))
+ )
+
+ # Build a new view if needed
+ if (tensor_result != results_index) or handle_out:
+ if handle_out:
+ kwargs["out"] = out
+ new_view = c_einsum(
+ tensor_result + '->' + results_index, new_view, **kwargs
+ )
+
+ # Call einsum
+ else:
+ # If out was specified
+ if handle_out:
+ kwargs["out"] = out
+
+ # Do the contraction
+ new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
+
+ # Append new items and dereference what we can
+ operands.append(new_view)
+ del tmp_operands, new_view
+
+ if specified_out:
+ return out
+ else:
+ return asanyarray(operands[0], order=output_order)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.pyi
new file mode 100644
index 00000000..9653a26d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/einsumfunc.pyi
@@ -0,0 +1,184 @@
+from collections.abc import Sequence
+from typing import Any, Literal, TypeAlias, TypeVar, overload
+
+import numpy as np
+from numpy import _OrderKACF, number
+from numpy._typing import (
+ NDArray,
+ _ArrayLikeBool_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeUInt_co,
+ _DTypeLikeBool,
+ _DTypeLikeComplex,
+ _DTypeLikeComplex_co,
+ _DTypeLikeFloat,
+ _DTypeLikeInt,
+ _DTypeLikeObject,
+ _DTypeLikeUInt,
+)
+
+__all__ = ["einsum", "einsum_path"]
+
+_ArrayT = TypeVar(
+ "_ArrayT",
+ bound=NDArray[np.bool | number],
+)
+
+_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None
+_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"]
+_CastingUnsafe: TypeAlias = Literal["unsafe"]
+
+# TODO: Properly handle the `casting`-based combinatorics
+# TODO: We need to evaluate the content `__subscripts` in order
+# to identify whether or an array or scalar is returned. At a cursory
+# glance this seems like something that can quite easily be done with
+# a mypy plugin.
+# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeBool_co,
+ out: None = ...,
+ dtype: _DTypeLikeBool | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeUInt_co,
+ out: None = ...,
+ dtype: _DTypeLikeUInt | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeInt_co,
+ out: None = ...,
+ dtype: _DTypeLikeInt | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeFloat_co,
+ out: None = ...,
+ dtype: _DTypeLikeFloat | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeComplex_co,
+ out: None = ...,
+ dtype: _DTypeLikeComplex | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ casting: _CastingUnsafe,
+ dtype: _DTypeLikeComplex_co | None = ...,
+ out: None = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeComplex_co,
+ out: _ArrayT,
+ dtype: _DTypeLikeComplex_co | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayT: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ out: _ArrayT,
+ casting: _CastingUnsafe,
+ dtype: _DTypeLikeComplex_co | None = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayT: ...
+
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeObject_co,
+ out: None = ...,
+ dtype: _DTypeLikeObject | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ casting: _CastingUnsafe,
+ dtype: _DTypeLikeObject | None = ...,
+ out: None = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeObject_co,
+ out: _ArrayT,
+ dtype: _DTypeLikeObject | None = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayT: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ out: _ArrayT,
+ casting: _CastingUnsafe,
+ dtype: _DTypeLikeObject | None = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayT: ...
+
+# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
+# It is therefore excluded from the signatures below.
+# NOTE: In practice the list consists of a `str` (first element)
+# and a variable number of integer tuples.
+def einsum_path(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeComplex_co | _DTypeLikeObject,
+ optimize: _OptimizeKind = "greedy",
+ einsum_call: Literal[False] = False,
+) -> tuple[list[Any], str]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.py b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.py
new file mode 100644
index 00000000..e20d774d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.py
@@ -0,0 +1,4269 @@
+"""Module containing non-deprecated functions borrowed from Numeric.
+
+"""
+import functools
+import types
+import warnings
+
+import numpy as np
+from numpy._utils import set_module
+
+from . import _methods, overrides
+from . import multiarray as mu
+from . import numerictypes as nt
+from . import umath as um
+from ._multiarray_umath import _array_converter
+from .multiarray import asanyarray, asarray, concatenate
+
+_dt_ = nt.sctype2char
+
+# functions that are methods
+__all__ = [
+ 'all', 'amax', 'amin', 'any', 'argmax',
+ 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
+ 'compress', 'cumprod', 'cumsum', 'cumulative_prod', 'cumulative_sum',
+ 'diagonal', 'mean', 'max', 'min', 'matrix_transpose',
+ 'ndim', 'nonzero', 'partition', 'prod', 'ptp', 'put',
+ 'ravel', 'repeat', 'reshape', 'resize', 'round',
+ 'searchsorted', 'shape', 'size', 'sort', 'squeeze',
+ 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
+]
+
+_gentype = types.GeneratorType
+# save away Python sum
+_sum_ = sum
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+# functions that are now methods
+def _wrapit(obj, method, *args, **kwds):
+ conv = _array_converter(obj)
+ # As this already tried the method, subok is maybe quite reasonable here
+ # but this follows what was done before. TODO: revisit this.
+ arr, = conv.as_arrays(subok=False)
+ result = getattr(arr, method)(*args, **kwds)
+
+ return conv.wrap(result, to_scalar=False)
+
+
+def _wrapfunc(obj, method, *args, **kwds):
+ bound = getattr(obj, method, None)
+ if bound is None:
+ return _wrapit(obj, method, *args, **kwds)
+
+ try:
+ return bound(*args, **kwds)
+ except TypeError:
+ # A TypeError occurs if the object does have such a method in its
+ # class, but its signature is not identical to that of NumPy's. This
+ # situation has occurred in the case of a downstream library like
+ # 'pandas'.
+ #
+ # Call _wrapit from within the except clause to ensure a potential
+ # exception has a traceback chain.
+ return _wrapit(obj, method, *args, **kwds)
+
+
+def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
+ passkwargs = {k: v for k, v in kwargs.items()
+ if v is not np._NoValue}
+
+ if type(obj) is not mu.ndarray:
+ try:
+ reduction = getattr(obj, method)
+ except AttributeError:
+ pass
+ else:
+ # This branch is needed for reductions like any which don't
+ # support a dtype.
+ if dtype is not None:
+ return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
+ else:
+ return reduction(axis=axis, out=out, **passkwargs)
+
+ return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
+
+
+def _wrapreduction_any_all(obj, ufunc, method, axis, out, **kwargs):
+ # Same as above function, but dtype is always bool (but never passed on)
+ passkwargs = {k: v for k, v in kwargs.items()
+ if v is not np._NoValue}
+
+ if type(obj) is not mu.ndarray:
+ try:
+ reduction = getattr(obj, method)
+ except AttributeError:
+ pass
+ else:
+ return reduction(axis=axis, out=out, **passkwargs)
+
+ return ufunc.reduce(obj, axis, bool, out, **passkwargs)
+
+
+def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
+ return (a, out)
+
+
+@array_function_dispatch(_take_dispatcher)
+def take(a, indices, axis=None, out=None, mode='raise'):
+ """
+ Take elements from an array along an axis.
+
+ When axis is not None, this function does the same thing as "fancy"
+ indexing (indexing arrays using arrays); however, it can be easier to use
+ if you need elements along a given axis. A call such as
+ ``np.take(arr, indices, axis=3)`` is equivalent to
+ ``arr[:,:,:,indices,...]``.
+
+ Explained without fancy indexing, this is equivalent to the following use
+ of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
+ indices::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ Nj = indices.shape
+ for ii in ndindex(Ni):
+ for jj in ndindex(Nj):
+ for kk in ndindex(Nk):
+ out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
+
+ Parameters
+ ----------
+ a : array_like (Ni..., M, Nk...)
+ The source array.
+ indices : array_like (Nj...)
+ The indices of the values to extract.
+ Also allow scalars for indices.
+ axis : int, optional
+ The axis over which to select values. By default, the flattened
+ input array is used.
+ out : ndarray, optional (Ni..., Nj..., Nk...)
+ If provided, the result will be placed in this array. It should
+ be of the appropriate shape and dtype. Note that `out` is always
+ buffered if `mode='raise'`; use other modes for better performance.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ 'clip' mode means that all indices that are too large are replaced
+ by the index that addresses the last element along that axis. Note
+ that this disables indexing with negative numbers.
+
+ Returns
+ -------
+ out : ndarray (Ni..., Nj..., Nk...)
+ The returned array has the same type as `a`.
+
+ See Also
+ --------
+ compress : Take elements using a boolean mask
+ ndarray.take : equivalent method
+ take_along_axis : Take elements by matching the array and the index arrays
+
+ Notes
+ -----
+ By eliminating the inner loop in the description above, and using `s_` to
+ build simple slice objects, `take` can be expressed in terms of applying
+ fancy indexing to each 1-d slice::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nj):
+ out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
+
+ For this reason, it is equivalent to (but faster than) the following use
+ of `apply_along_axis`::
+
+ out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = [4, 3, 5, 7, 6, 8]
+ >>> indices = [0, 1, 4]
+ >>> np.take(a, indices)
+ array([4, 3, 6])
+
+ In this example if `a` is an ndarray, "fancy" indexing can be used.
+
+ >>> a = np.array(a)
+ >>> a[indices]
+ array([4, 3, 6])
+
+ If `indices` is not one dimensional, the output also has these dimensions.
+
+ >>> np.take(a, [[0, 1], [2, 3]])
+ array([[4, 3],
+ [5, 7]])
+ """
+ return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
+
+
+def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None,
+ copy=None):
+ return (a,)
+
+
+@array_function_dispatch(_reshape_dispatcher)
+def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None):
+ """
+ Gives a new shape to an array without changing its data.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be reshaped.
+ shape : int or tuple of ints
+ The new shape should be compatible with the original shape. If
+ an integer, then the result will be a 1-D array of that length.
+ One shape dimension can be -1. In this case, the value is
+ inferred from the length of the array and remaining dimensions.
+ order : {'C', 'F', 'A'}, optional
+ Read the elements of ``a`` using this index order, and place the
+ elements into the reshaped array using this index order. 'C'
+ means to read / write the elements using C-like index order,
+ with the last axis index changing fastest, back to the first
+ axis index changing slowest. 'F' means to read / write the
+ elements using Fortran-like index order, with the first index
+ changing fastest, and the last index changing slowest. Note that
+ the 'C' and 'F' options take no account of the memory layout of
+ the underlying array, and only refer to the order of indexing.
+ 'A' means to read / write the elements in Fortran-like index
+ order if ``a`` is Fortran *contiguous* in memory, C-like order
+ otherwise.
+ newshape : int or tuple of ints
+ .. deprecated:: 2.1
+ Replaced by ``shape`` argument. Retained for backward
+ compatibility.
+ copy : bool, optional
+ If ``True``, then the array data is copied. If ``None``, a copy will
+ only be made if it's required by ``order``. For ``False`` it raises
+ a ``ValueError`` if a copy cannot be avoided. Default: ``None``.
+
+ Returns
+ -------
+ reshaped_array : ndarray
+ This will be a new view object if possible; otherwise, it will
+ be a copy. Note there is no guarantee of the *memory layout* (C- or
+ Fortran- contiguous) of the returned array.
+
+ See Also
+ --------
+ ndarray.reshape : Equivalent method.
+
+ Notes
+ -----
+ It is not always possible to change the shape of an array without copying
+ the data.
+
+ The ``order`` keyword gives the index ordering both for *fetching*
+ the values from ``a``, and then *placing* the values into the output
+ array. For example, let's say you have an array:
+
+ >>> a = np.arange(6).reshape((3, 2))
+ >>> a
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+
+ You can think of reshaping as first raveling the array (using the given
+ index order), then inserting the elements from the raveled array into the
+ new array using the same kind of index ordering as was used for the
+ raveling.
+
+ >>> np.reshape(a, (2, 3)) # C-like index ordering
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
+ array([[0, 4, 3],
+ [2, 1, 5]])
+ >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
+ array([[0, 4, 3],
+ [2, 1, 5]])
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1,2,3], [4,5,6]])
+ >>> np.reshape(a, 6)
+ array([1, 2, 3, 4, 5, 6])
+ >>> np.reshape(a, 6, order='F')
+ array([1, 4, 2, 5, 3, 6])
+
+ >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ """
+ if newshape is None and shape is None:
+ raise TypeError(
+ "reshape() missing 1 required positional argument: 'shape'")
+ if newshape is not None:
+ if shape is not None:
+ raise TypeError(
+ "You cannot specify 'newshape' and 'shape' arguments "
+ "at the same time.")
+ # Deprecated in NumPy 2.1, 2024-04-18
+ warnings.warn(
+ "`newshape` keyword argument is deprecated, "
+ "use `shape=...` or pass shape positionally instead. "
+ "(deprecated in NumPy 2.1)",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ shape = newshape
+ if copy is not None:
+ return _wrapfunc(a, 'reshape', shape, order=order, copy=copy)
+ return _wrapfunc(a, 'reshape', shape, order=order)
+
+
+def _choose_dispatcher(a, choices, out=None, mode=None):
+ yield a
+ yield from choices
+ yield out
+
+
+@array_function_dispatch(_choose_dispatcher)
+def choose(a, choices, out=None, mode='raise'):
+ """
+ Construct an array from an index array and a list of arrays to choose from.
+
+ First of all, if confused or uncertain, definitely look at the Examples -
+ in its full generality, this function is less simple than it might
+ seem from the following code description::
+
+ np.choose(a,c) == np.array([c[a[I]][I] for I in np.ndindex(a.shape)])
+
+ But this omits some subtleties. Here is a fully general summary:
+
+ Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
+ (`choices`), `a` and each choice array are first broadcast, as necessary,
+ to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
+ 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
+ for each ``i``. Then, a new array with shape ``Ba.shape`` is created as
+ follows:
+
+ * if ``mode='raise'`` (the default), then, first of all, each element of
+ ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
+ that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
+ position in ``Ba`` - then the value at the same position in the new array
+ is the value in ``Bchoices[i]`` at that same position;
+
+ * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
+ integer; modular arithmetic is used to map integers outside the range
+ `[0, n-1]` back into that range; and then the new array is constructed
+ as above;
+
+ * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
+ integer; negative integers are mapped to 0; values greater than ``n-1``
+ are mapped to ``n-1``; and then the new array is constructed as above.
+
+ Parameters
+ ----------
+ a : int array
+ This array must contain integers in ``[0, n-1]``, where ``n`` is the
+ number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
+ cases any integers are permissible.
+ choices : sequence of arrays
+ Choice arrays. `a` and all of the choices must be broadcastable to the
+ same shape. If `choices` is itself an array (not recommended), then
+ its outermost dimension (i.e., the one corresponding to
+ ``choices.shape[0]``) is taken as defining the "sequence".
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype. Note that `out` is always
+ buffered if ``mode='raise'``; use other modes for better performance.
+ mode : {'raise' (default), 'wrap', 'clip'}, optional
+ Specifies how indices outside ``[0, n-1]`` will be treated:
+
+ * 'raise' : an exception is raised
+ * 'wrap' : value becomes value mod ``n``
+ * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
+
+ Returns
+ -------
+ merged_array : array
+ The merged result.
+
+ Raises
+ ------
+ ValueError: shape mismatch
+ If `a` and each choice array are not all broadcastable to the same
+ shape.
+
+ See Also
+ --------
+ ndarray.choose : equivalent method
+ numpy.take_along_axis : Preferable if `choices` is an array
+
+ Notes
+ -----
+ To reduce the chance of misinterpretation, even though the following
+ "abuse" is nominally supported, `choices` should neither be, nor be
+ thought of as, a single array, i.e., the outermost sequence-like container
+ should be either a list or a tuple.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+ ... [20, 21, 22, 23], [30, 31, 32, 33]]
+ >>> np.choose([2, 3, 1, 0], choices
+ ... # the first element of the result will be the first element of the
+ ... # third (2+1) "array" in choices, namely, 20; the second element
+ ... # will be the second element of the fourth (3+1) choice array, i.e.,
+ ... # 31, etc.
+ ... )
+ array([20, 31, 12, 3])
+ >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
+ array([20, 31, 12, 3])
+ >>> # because there are 4 choice arrays
+ >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
+ array([20, 1, 12, 3])
+ >>> # i.e., 0
+
+ A couple examples illustrating how choose broadcasts:
+
+ >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
+ >>> choices = [-10, 10]
+ >>> np.choose(a, choices)
+ array([[ 10, -10, 10],
+ [-10, 10, -10],
+ [ 10, -10, 10]])
+
+ >>> # With thanks to Anne Archibald
+ >>> a = np.array([0, 1]).reshape((2,1,1))
+ >>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
+ >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
+ >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
+ array([[[ 1, 1, 1, 1, 1],
+ [ 2, 2, 2, 2, 2],
+ [ 3, 3, 3, 3, 3]],
+ [[-1, -2, -3, -4, -5],
+ [-1, -2, -3, -4, -5],
+ [-1, -2, -3, -4, -5]]])
+
+ """
+ return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
+
+
+def _repeat_dispatcher(a, repeats, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_repeat_dispatcher)
+def repeat(a, repeats, axis=None):
+ """
+ Repeat each element of an array after themselves
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ repeats : int or array of ints
+ The number of repetitions for each element. `repeats` is broadcasted
+ to fit the shape of the given axis.
+ axis : int, optional
+ The axis along which to repeat values. By default, use the
+ flattened input array, and return a flat output array.
+
+ Returns
+ -------
+ repeated_array : ndarray
+ Output array which has the same shape as `a`, except along
+ the given axis.
+
+ See Also
+ --------
+ tile : Tile an array.
+ unique : Find the unique elements of an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.repeat(3, 4)
+ array([3, 3, 3, 3])
+ >>> x = np.array([[1,2],[3,4]])
+ >>> np.repeat(x, 2)
+ array([1, 1, 2, 2, 3, 3, 4, 4])
+ >>> np.repeat(x, 3, axis=1)
+ array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 4, 4, 4]])
+ >>> np.repeat(x, [1, 2], axis=0)
+ array([[1, 2],
+ [3, 4],
+ [3, 4]])
+
+ """
+ return _wrapfunc(a, 'repeat', repeats, axis=axis)
+
+
+def _put_dispatcher(a, ind, v, mode=None):
+ return (a, ind, v)
+
+
+@array_function_dispatch(_put_dispatcher)
+def put(a, ind, v, mode='raise'):
+ """
+ Replaces specified elements of an array with given values.
+
+ The indexing works on the flattened target array. `put` is roughly
+ equivalent to:
+
+ ::
+
+ a.flat[ind] = v
+
+ Parameters
+ ----------
+ a : ndarray
+ Target array.
+ ind : array_like
+ Target indices, interpreted as integers.
+ v : array_like
+ Values to place in `a` at target indices. If `v` is shorter than
+ `ind` it will be repeated as necessary.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ 'clip' mode means that all indices that are too large are replaced
+ by the index that addresses the last element along that axis. Note
+ that this disables indexing with negative numbers. In 'raise' mode,
+ if an exception occurs the target array may still be modified.
+
+ See Also
+ --------
+ putmask, place
+ put_along_axis : Put elements by matching the array and the index arrays
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(5)
+ >>> np.put(a, [0, 2], [-44, -55])
+ >>> a
+ array([-44, 1, -55, 3, 4])
+
+ >>> a = np.arange(5)
+ >>> np.put(a, 22, -5, mode='clip')
+ >>> a
+ array([ 0, 1, 2, 3, -5])
+
+ """
+ try:
+ put = a.put
+ except AttributeError as e:
+ raise TypeError(f"argument 1 must be numpy.ndarray, not {type(a)}") from e
+
+ return put(ind, v, mode=mode)
+
+
+def _swapaxes_dispatcher(a, axis1, axis2):
+ return (a,)
+
+
+@array_function_dispatch(_swapaxes_dispatcher)
+def swapaxes(a, axis1, axis2):
+ """
+ Interchange two axes of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis1 : int
+ First axis.
+ axis2 : int
+ Second axis.
+
+ Returns
+ -------
+ a_swapped : ndarray
+ For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
+ returned; otherwise a new array is created. For earlier NumPy
+ versions a view of `a` is returned only if the order of the
+ axes is changed, otherwise the input array is returned.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([[1,2,3]])
+ >>> np.swapaxes(x,0,1)
+ array([[1],
+ [2],
+ [3]])
+
+ >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
+ >>> x
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ >>> np.swapaxes(x,0,2)
+ array([[[0, 4],
+ [2, 6]],
+ [[1, 5],
+ [3, 7]]])
+
+ """
+ return _wrapfunc(a, 'swapaxes', axis1, axis2)
+
+
+def _transpose_dispatcher(a, axes=None):
+ return (a,)
+
+
+@array_function_dispatch(_transpose_dispatcher)
+def transpose(a, axes=None):
+ """
+ Returns an array with axes transposed.
+
+ For a 1-D array, this returns an unchanged view of the original array, as a
+ transposed vector is simply the same vector.
+ To convert a 1-D array into a 2-D column vector, an additional dimension
+ must be added, e.g., ``np.atleast_2d(a).T`` achieves this, as does
+ ``a[:, np.newaxis]``.
+ For a 2-D array, this is the standard matrix transpose.
+ For an n-D array, if axes are given, their order indicates how the
+ axes are permuted (see Examples). If axes are not provided, then
+ ``transpose(a).shape == a.shape[::-1]``.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axes : tuple or list of ints, optional
+ If specified, it must be a tuple or list which contains a permutation
+ of [0, 1, ..., N-1] where N is the number of axes of `a`. Negative
+ indices can also be used to specify axes. The i-th axis of the returned
+ array will correspond to the axis numbered ``axes[i]`` of the input.
+ If not specified, defaults to ``range(a.ndim)[::-1]``, which reverses
+ the order of the axes.
+
+ Returns
+ -------
+ p : ndarray
+ `a` with its axes permuted. A view is returned whenever possible.
+
+ See Also
+ --------
+ ndarray.transpose : Equivalent method.
+ moveaxis : Move axes of an array to new positions.
+ argsort : Return the indices that would sort an array.
+
+ Notes
+ -----
+ Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors
+ when using the `axes` keyword argument.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> np.transpose(a)
+ array([[1, 3],
+ [2, 4]])
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> a
+ array([1, 2, 3, 4])
+ >>> np.transpose(a)
+ array([1, 2, 3, 4])
+
+ >>> a = np.ones((1, 2, 3))
+ >>> np.transpose(a, (1, 0, 2)).shape
+ (2, 1, 3)
+
+ >>> a = np.ones((2, 3, 4, 5))
+ >>> np.transpose(a).shape
+ (5, 4, 3, 2)
+
+ >>> a = np.arange(3*4*5).reshape((3, 4, 5))
+ >>> np.transpose(a, (-1, 0, -2)).shape
+ (5, 3, 4)
+
+ """
+ return _wrapfunc(a, 'transpose', axes)
+
+
+def _matrix_transpose_dispatcher(x):
+ return (x,)
+
+@array_function_dispatch(_matrix_transpose_dispatcher)
+def matrix_transpose(x, /):
+ """
+ Transposes a matrix (or a stack of matrices) ``x``.
+
+ This function is Array API compatible.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array having shape (..., M, N) and whose two innermost
+ dimensions form ``MxN`` matrices.
+
+ Returns
+ -------
+ out : ndarray
+ An array containing the transpose for each matrix and having shape
+ (..., N, M).
+
+ See Also
+ --------
+ transpose : Generic transpose method.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.matrix_transpose([[1, 2], [3, 4]])
+ array([[1, 3],
+ [2, 4]])
+
+ >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
+ array([[[1, 3],
+ [2, 4]],
+ [[5, 7],
+ [6, 8]]])
+
+ """
+ x = asanyarray(x)
+ if x.ndim < 2:
+ raise ValueError(
+ f"Input array must be at least 2-dimensional, but it is {x.ndim}"
+ )
+ return swapaxes(x, -1, -2)
+
+
+def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
+def partition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Return a partitioned copy of an array.
+
+ Creates a copy of the array and partially sorts it in such a way that
+ the value of the element in k-th position is in the position it would be
+ in a sorted array. In the output array, all elements smaller than the k-th
+ element are located to the left of this element and all equal or greater
+ are located to its right. The ordering of the elements in the two
+ partitions on the either side of the k-th element in the output array is
+ undefined.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ kth : int or sequence of ints
+ Element index to partition by. The k-th value of the element
+ will be in its final sorted position and all smaller elements
+ will be moved before it and all equal or greater elements behind
+ it. The order of all elements in the partitions is undefined. If
+ provided with a sequence of k-th it will partition all elements
+ indexed by k-th of them into their sorted position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
+ axis : int or None, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument
+ specifies which fields to compare first, second, etc. A single
+ field can be specified as a string. Not all fields need be
+ specified, but unspecified fields will still be used, in the
+ order in which they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ partitioned_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ ndarray.partition : Method to sort an array in-place.
+ argpartition : Indirect partition.
+ sort : Full sorting
+
+ Notes
+ -----
+ The various selection algorithms are characterized by their average
+ speed, worst case performance, work space size, and whether they are
+ stable. A stable sort keeps items with the same key in the same
+ relative order. The available algorithms have the following
+ properties:
+
+ ================= ======= ============= ============ =======
+ kind speed worst case work space stable
+ ================= ======= ============= ============ =======
+ 'introselect' 1 O(n) 0 no
+ ================= ======= ============= ============ =======
+
+ All the partition algorithms make temporary copies of the data when
+ partitioning along any but the last axis. Consequently,
+ partitioning along the last axis is faster and uses less space than
+ partitioning along any other axis.
+
+ The sort order for complex numbers is lexicographic. If both the
+ real and imaginary parts are non-nan then the order is determined by
+ the real parts except when they are equal, in which case the order
+ is determined by the imaginary parts.
+
+ The sort order of ``np.nan`` is bigger than ``np.inf``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0])
+ >>> p = np.partition(a, 4)
+ >>> p
+ array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7]) # may vary
+
+ ``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal
+ to ``p[4]``, and all elements in ``p[5:]`` are greater than or
+ equal to ``p[4]``. The partition is::
+
+ [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7]
+
+ The next example shows the use of multiple values passed to `kth`.
+
+ >>> p2 = np.partition(a, (4, 8))
+ >>> p2
+ array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7])
+
+ ``p2[4]`` is 2 and ``p2[8]`` is 5. All elements in ``p2[:4]``
+ are less than or equal to ``p2[4]``, all elements in ``p2[5:8]``
+ are greater than or equal to ``p2[4]`` and less than or equal to
+ ``p2[8]``, and all elements in ``p2[9:]`` are greater than or
+ equal to ``p2[8]``. The partition is::
+
+ [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7]
+ """
+ if axis is None:
+ # flatten returns (1, N) for np.matrix, so always use the last axis
+ a = asanyarray(a).flatten()
+ axis = -1
+ else:
+ a = asanyarray(a).copy(order="K")
+ a.partition(kth, axis=axis, kind=kind, order=order)
+ return a
+
+
+def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argpartition_dispatcher)
+def argpartition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Perform an indirect partition along the given axis using the
+ algorithm specified by the `kind` keyword. It returns an array of
+ indices of the same shape as `a` that index data along the given
+ axis in partitioned order.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to sort.
+ kth : int or sequence of ints
+ Element index to partition by. The k-th element will be in its
+ final sorted position and all smaller elements will be moved
+ before it and all larger elements behind it. The order of all
+ elements in the partitions is undefined. If provided with a
+ sequence of k-th it will partition all of them into their sorted
+ position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
+ axis : int or None, optional
+ Axis along which to sort. The default is -1 (the last axis). If
+ None, the flattened array is used.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument
+ specifies which fields to compare first, second, etc. A single
+ field can be specified as a string, and not all fields need be
+ specified, but unspecified fields will still be used, in the
+ order in which they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ index_array : ndarray, int
+ Array of indices that partition `a` along the specified axis.
+ If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=axis)``
+ always yields the partitioned `a`, irrespective of dimensionality.
+
+ See Also
+ --------
+ partition : Describes partition algorithms used.
+ ndarray.partition : Inplace partition.
+ argsort : Full indirect sort.
+ take_along_axis : Apply ``index_array`` from argpartition
+ to an array as if by calling partition.
+
+ Notes
+ -----
+ The returned indices are not guaranteed to be sorted according to
+ the values. Furthermore, the default selection algorithm ``introselect``
+ is unstable, and hence the returned indices are not guaranteed
+ to be the earliest/latest occurrence of the element.
+
+ `argpartition` works for real/complex inputs with nan values,
+ see `partition` for notes on the enhanced sort order and
+ different selection algorithms.
+
+ Examples
+ --------
+ One dimensional array:
+
+ >>> import numpy as np
+ >>> x = np.array([3, 4, 2, 1])
+ >>> x[np.argpartition(x, 3)]
+ array([2, 1, 3, 4]) # may vary
+ >>> x[np.argpartition(x, (1, 3))]
+ array([1, 2, 3, 4]) # may vary
+
+ >>> x = [3, 4, 2, 1]
+ >>> np.array(x)[np.argpartition(x, 3)]
+ array([2, 1, 3, 4]) # may vary
+
+ Multi-dimensional array:
+
+ >>> x = np.array([[3, 4, 2], [1, 3, 1]])
+ >>> index_array = np.argpartition(x, kth=1, axis=-1)
+ >>> # below is the same as np.partition(x, kth=1)
+ >>> np.take_along_axis(x, index_array, axis=-1)
+ array([[2, 3, 4],
+ [1, 1, 3]])
+
+ """
+ return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
+
+
+def _sort_dispatcher(a, axis=None, kind=None, order=None, *, stable=None):
+ return (a,)
+
+
+@array_function_dispatch(_sort_dispatcher)
+def sort(a, axis=-1, kind=None, order=None, *, stable=None):
+ """
+ Return a sorted copy of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ axis : int or None, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort or radix sort under the covers and,
+ in general, the actual implementation will vary with data type.
+ The 'mergesort' option is retained for backwards compatibility.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+ stable : bool, optional
+ Sort stability. If ``True``, the returned array will maintain
+ the relative order of ``a`` values which compare as equal.
+ If ``False`` or ``None``, this is not guaranteed. Internally,
+ this option selects ``kind='stable'``. Default: ``None``.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ sorted_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ ndarray.sort : Method to sort an array in-place.
+ argsort : Indirect sort.
+ lexsort : Indirect stable sort on multiple keys.
+ searchsorted : Find elements in a sorted array.
+ partition : Partial sort.
+
+ Notes
+ -----
+ The various sorting algorithms are characterized by their average speed,
+ worst case performance, work space size, and whether they are stable. A
+ stable sort keeps items with the same key in the same relative
+ order. The four algorithms implemented in NumPy have the following
+ properties:
+
+ =========== ======= ============= ============ ========
+ kind speed worst case work space stable
+ =========== ======= ============= ============ ========
+ 'quicksort' 1 O(n^2) 0 no
+ 'heapsort' 3 O(n*log(n)) 0 no
+ 'mergesort' 2 O(n*log(n)) ~n/2 yes
+ 'timsort' 2 O(n*log(n)) ~n/2 yes
+ =========== ======= ============= ============ ========
+
+ .. note:: The datatype determines which of 'mergesort' or 'timsort'
+ is actually used, even if 'mergesort' is specified. User selection
+ at a finer scale is not currently available.
+
+ For performance, ``sort`` makes a temporary copy if needed to make the data
+ `contiguous `_
+ in memory along the sort axis. For even better performance and reduced
+ memory consumption, ensure that the array is already contiguous along the
+ sort axis.
+
+ The sort order for complex numbers is lexicographic. If both the real
+ and imaginary parts are non-nan then the order is determined by the
+ real parts except when they are equal, in which case the order is
+ determined by the imaginary parts.
+
+ Previous to numpy 1.4.0 sorting real and complex arrays containing nan
+ values led to undefined behaviour. In numpy versions >= 1.4.0 nan
+ values are sorted to the end. The extended sort order is:
+
+ * Real: [R, nan]
+ * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
+
+ where R is a non-nan real value. Complex values with the same nan
+ placements are sorted according to the non-nan part if it exists.
+ Non-nan values are sorted as before.
+
+ quicksort has been changed to:
+ `introsort `_.
+ When sorting does not make enough progress it switches to
+ `heapsort `_.
+ This implementation makes quicksort O(n*log(n)) in the worst case.
+
+ 'stable' automatically chooses the best stable sorting algorithm
+ for the data type being sorted.
+ It, along with 'mergesort' is currently mapped to
+ `timsort `_
+ or `radix sort `_
+ depending on the data type.
+ API forward compatibility currently limits the
+ ability to select the implementation and it is hardwired for the different
+ data types.
+
+ Timsort is added for better performance on already or nearly
+ sorted data. On random data timsort is almost identical to
+ mergesort. It is now used for stable sort while quicksort is still the
+ default sort if none is chosen. For timsort details, refer to
+ `CPython listsort.txt
+ `_
+ 'mergesort' and 'stable' are mapped to radix sort for integer data types.
+ Radix sort is an O(n) sort instead of O(n log n).
+
+ NaT now sorts to the end of arrays for consistency with NaN.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1,4],[3,1]])
+ >>> np.sort(a) # sort along the last axis
+ array([[1, 4],
+ [1, 3]])
+ >>> np.sort(a, axis=None) # sort the flattened array
+ array([1, 1, 3, 4])
+ >>> np.sort(a, axis=0) # sort along the first axis
+ array([[1, 1],
+ [3, 4]])
+
+ Use the `order` keyword to specify a field to use when sorting a
+ structured array:
+
+ >>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
+ >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
+ ... ('Galahad', 1.7, 38)]
+ >>> a = np.array(values, dtype=dtype) # create a structured array
+ >>> np.sort(a, order='height') # doctest: +SKIP
+ array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+ ('Lancelot', 1.8999999999999999, 38)],
+ dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
+ array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
+ ('Arthur', 1.8, 41)],
+ dtype=[('name', '|S10'), ('height', '>> import numpy as np
+ >>> x = np.array([3, 1, 2])
+ >>> np.argsort(x)
+ array([1, 2, 0])
+
+ Two-dimensional array:
+
+ >>> x = np.array([[0, 3], [2, 2]])
+ >>> x
+ array([[0, 3],
+ [2, 2]])
+
+ >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
+ >>> ind
+ array([[0, 1],
+ [1, 0]])
+ >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
+ array([[0, 2],
+ [2, 3]])
+
+ >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
+ >>> ind
+ array([[0, 1],
+ [0, 1]])
+ >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
+ array([[0, 3],
+ [2, 2]])
+
+ Indices of the sorted elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
+ >>> ind
+ (array([0, 1, 1, 0]), array([0, 0, 1, 1]))
+ >>> x[ind] # same as np.sort(x, axis=None)
+ array([0, 2, 2, 3])
+
+ Sorting with keys:
+
+ >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x
+ array([(1, 0), (0, 1)],
+ dtype=[('x', '>> np.argsort(x, order=('x','y'))
+ array([1, 0])
+
+ >>> np.argsort(x, order=('y','x'))
+ array([0, 1])
+
+ """
+ return _wrapfunc(
+ a, 'argsort', axis=axis, kind=kind, order=order, stable=stable
+ )
+
+def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
+ return (a, out)
+
+
+@array_function_dispatch(_argmax_dispatcher)
+def argmax(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Returns the indices of the maximum values along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ By default, the index is into the flattened array, otherwise
+ along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray of ints
+ Array of indices into the array. It has the same shape as ``a.shape``
+ with the dimension along `axis` removed. If `keepdims` is set to True,
+ then the size of `axis` will be 1 with the resulting array having same
+ shape as ``a.shape``.
+
+ See Also
+ --------
+ ndarray.argmax, argmin
+ amax : The maximum value along a given axis.
+ unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmax to an array as if by calling max.
+
+ Notes
+ -----
+ In case of multiple occurrences of the maximum values, the indices
+ corresponding to the first occurrence are returned.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(6).reshape(2,3) + 10
+ >>> a
+ array([[10, 11, 12],
+ [13, 14, 15]])
+ >>> np.argmax(a)
+ 5
+ >>> np.argmax(a, axis=0)
+ array([1, 1, 1])
+ >>> np.argmax(a, axis=1)
+ array([2, 2])
+
+ Indexes of the maximal elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
+ >>> ind
+ (1, 2)
+ >>> a[ind]
+ 15
+
+ >>> b = np.arange(6)
+ >>> b[1] = 5
+ >>> b
+ array([0, 5, 2, 3, 4, 5])
+ >>> np.argmax(b) # Only the first occurrence is returned.
+ 1
+
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmax(x, axis=-1)
+ >>> # Same as np.amax(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[4],
+ [3]])
+ >>> # Same as np.amax(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1),
+ ... axis=-1).squeeze(axis=-1)
+ array([4, 3])
+
+ Setting `keepdims` to `True`,
+
+ >>> x = np.arange(24).reshape((2, 3, 4))
+ >>> res = np.argmax(x, axis=1, keepdims=True)
+ >>> res.shape
+ (2, 1, 4)
+ """
+ kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
+ return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds)
+
+
+def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
+ return (a, out)
+
+
+@array_function_dispatch(_argmin_dispatcher)
+def argmin(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Returns the indices of the minimum values along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ By default, the index is into the flattened array, otherwise
+ along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray of ints
+ Array of indices into the array. It has the same shape as `a.shape`
+ with the dimension along `axis` removed. If `keepdims` is set to True,
+ then the size of `axis` will be 1 with the resulting array having same
+ shape as `a.shape`.
+
+ See Also
+ --------
+ ndarray.argmin, argmax
+ amin : The minimum value along a given axis.
+ unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmin to an array as if by calling min.
+
+ Notes
+ -----
+ In case of multiple occurrences of the minimum values, the indices
+ corresponding to the first occurrence are returned.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(6).reshape(2,3) + 10
+ >>> a
+ array([[10, 11, 12],
+ [13, 14, 15]])
+ >>> np.argmin(a)
+ 0
+ >>> np.argmin(a, axis=0)
+ array([0, 0, 0])
+ >>> np.argmin(a, axis=1)
+ array([0, 0])
+
+ Indices of the minimum elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
+ >>> ind
+ (0, 0)
+ >>> a[ind]
+ 10
+
+ >>> b = np.arange(6) + 10
+ >>> b[4] = 10
+ >>> b
+ array([10, 11, 12, 13, 10, 15])
+ >>> np.argmin(b) # Only the first occurrence is returned.
+ 0
+
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmin(x, axis=-1)
+ >>> # Same as np.amin(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[2],
+ [0]])
+ >>> # Same as np.amax(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1),
+ ... axis=-1).squeeze(axis=-1)
+ array([2, 0])
+
+ Setting `keepdims` to `True`,
+
+ >>> x = np.arange(24).reshape((2, 3, 4))
+ >>> res = np.argmin(x, axis=1, keepdims=True)
+ >>> res.shape
+ (2, 1, 4)
+ """
+ kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
+ return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds)
+
+
+def _searchsorted_dispatcher(a, v, side=None, sorter=None):
+ return (a, v, sorter)
+
+
+@array_function_dispatch(_searchsorted_dispatcher)
+def searchsorted(a, v, side='left', sorter=None):
+ """
+ Find indices where elements should be inserted to maintain order.
+
+ Find the indices into a sorted array `a` such that, if the
+ corresponding elements in `v` were inserted before the indices, the
+ order of `a` would be preserved.
+
+ Assuming that `a` is sorted:
+
+ ====== ============================
+ `side` returned index `i` satisfies
+ ====== ============================
+ left ``a[i-1] < v <= a[i]``
+ right ``a[i-1] <= v < a[i]``
+ ====== ============================
+
+ Parameters
+ ----------
+ a : 1-D array_like
+ Input array. If `sorter` is None, then it must be sorted in
+ ascending order, otherwise `sorter` must be an array of indices
+ that sort it.
+ v : array_like
+ Values to insert into `a`.
+ side : {'left', 'right'}, optional
+ If 'left', the index of the first suitable location found is given.
+ If 'right', return the last such index. If there is no suitable
+ index, return either 0 or N (where N is the length of `a`).
+ sorter : 1-D array_like, optional
+ Optional array of integer indices that sort array a into ascending
+ order. They are typically the result of argsort.
+
+ Returns
+ -------
+ indices : int or array of ints
+ Array of insertion points with the same shape as `v`,
+ or an integer if `v` is a scalar.
+
+ See Also
+ --------
+ sort : Return a sorted copy of an array.
+ histogram : Produce histogram from 1-D data.
+
+ Notes
+ -----
+ Binary search is used to find the required insertion points.
+
+ As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
+ `nan` values. The enhanced sort order is documented in `sort`.
+
+ This function uses the same algorithm as the builtin python
+ `bisect.bisect_left` (``side='left'``) and `bisect.bisect_right`
+ (``side='right'``) functions, which is also vectorized
+ in the `v` argument.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.searchsorted([11,12,13,14,15], 13)
+ 2
+ >>> np.searchsorted([11,12,13,14,15], 13, side='right')
+ 3
+ >>> np.searchsorted([11,12,13,14,15], [-10, 20, 12, 13])
+ array([0, 5, 1, 2])
+
+ When `sorter` is used, the returned indices refer to the sorted
+ array of `a` and not `a` itself:
+
+ >>> a = np.array([40, 10, 20, 30])
+ >>> sorter = np.argsort(a)
+ >>> sorter
+ array([1, 2, 3, 0]) # Indices that would sort the array 'a'
+ >>> result = np.searchsorted(a, 25, sorter=sorter)
+ >>> result
+ 2
+ >>> a[sorter[result]]
+ 30 # The element at index 2 of the sorted array is 30.
+ """
+ return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
+
+
+def _resize_dispatcher(a, new_shape):
+ return (a,)
+
+
+@array_function_dispatch(_resize_dispatcher)
+def resize(a, new_shape):
+ """
+ Return a new array with the specified shape.
+
+ If the new array is larger than the original array, then the new
+ array is filled with repeated copies of `a`. Note that this behavior
+ is different from a.resize(new_shape) which fills with zeros instead
+ of repeated copies of `a`.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be resized.
+
+ new_shape : int or tuple of int
+ Shape of resized array.
+
+ Returns
+ -------
+ reshaped_array : ndarray
+ The new array is formed from the data in the old array, repeated
+ if necessary to fill out the required number of elements. The
+ data are repeated iterating over the array in C-order.
+
+ See Also
+ --------
+ numpy.reshape : Reshape an array without changing the total size.
+ numpy.pad : Enlarge and pad an array.
+ numpy.repeat : Repeat elements of an array.
+ ndarray.resize : resize an array in-place.
+
+ Notes
+ -----
+ When the total size of the array does not change `~numpy.reshape` should
+ be used. In most other cases either indexing (to reduce the size)
+ or padding (to increase the size) may be a more appropriate solution.
+
+ Warning: This functionality does **not** consider axes separately,
+ i.e. it does not apply interpolation/extrapolation.
+ It fills the return array with the required number of elements, iterating
+ over `a` in C-order, disregarding axes (and cycling back from the start if
+ the new shape is larger). This functionality is therefore not suitable to
+ resize images, or data where each axis represents a separate and distinct
+ entity.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[0,1],[2,3]])
+ >>> np.resize(a,(2,3))
+ array([[0, 1, 2],
+ [3, 0, 1]])
+ >>> np.resize(a,(1,4))
+ array([[0, 1, 2, 3]])
+ >>> np.resize(a,(2,4))
+ array([[0, 1, 2, 3],
+ [0, 1, 2, 3]])
+
+ """
+ if isinstance(new_shape, (int, nt.integer)):
+ new_shape = (new_shape,)
+
+ a = ravel(a)
+
+ new_size = 1
+ for dim_length in new_shape:
+ new_size *= dim_length
+ if dim_length < 0:
+ raise ValueError(
+ 'all elements of `new_shape` must be non-negative'
+ )
+
+ if a.size == 0 or new_size == 0:
+ # First case must zero fill. The second would have repeats == 0.
+ return np.zeros_like(a, shape=new_shape)
+
+ # ceiling division without negating new_size
+ repeats = (new_size + a.size - 1) // a.size
+ a = concatenate((a,) * repeats)[:new_size]
+
+ return reshape(a, new_shape)
+
+
+def _squeeze_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_squeeze_dispatcher)
+def squeeze(a, axis=None):
+ """
+ Remove axes of length one from `a`.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Selects a subset of the entries of length one in the
+ shape. If an axis is selected with shape entry greater than
+ one, an error is raised.
+
+ Returns
+ -------
+ squeezed : ndarray
+ The input array, but with all or a subset of the
+ dimensions of length 1 removed. This is always `a` itself
+ or a view into `a`. Note that if all axes are squeezed,
+ the result is a 0d array and not a scalar.
+
+ Raises
+ ------
+ ValueError
+ If `axis` is not None, and an axis being squeezed is not of length 1
+
+ See Also
+ --------
+ expand_dims : The inverse operation, adding entries of length one
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([[[0], [1], [2]]])
+ >>> x.shape
+ (1, 3, 1)
+ >>> np.squeeze(x).shape
+ (3,)
+ >>> np.squeeze(x, axis=0).shape
+ (3, 1)
+ >>> np.squeeze(x, axis=1).shape
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot select an axis to squeeze out which has size
+ not equal to one
+ >>> np.squeeze(x, axis=2).shape
+ (1, 3)
+ >>> x = np.array([[1234]])
+ >>> x.shape
+ (1, 1)
+ >>> np.squeeze(x)
+ array(1234) # 0d array
+ >>> np.squeeze(x).shape
+ ()
+ >>> np.squeeze(x)[()]
+ 1234
+
+ """
+ try:
+ squeeze = a.squeeze
+ except AttributeError:
+ return _wrapit(a, 'squeeze', axis=axis)
+ if axis is None:
+ return squeeze()
+ else:
+ return squeeze(axis=axis)
+
+
+def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
+ return (a,)
+
+
+@array_function_dispatch(_diagonal_dispatcher)
+def diagonal(a, offset=0, axis1=0, axis2=1):
+ """
+ Return specified diagonals.
+
+ If `a` is 2-D, returns the diagonal of `a` with the given offset,
+ i.e., the collection of elements of the form ``a[i, i+offset]``. If
+ `a` has more than two dimensions, then the axes specified by `axis1`
+ and `axis2` are used to determine the 2-D sub-array whose diagonal is
+ returned. The shape of the resulting array can be determined by
+ removing `axis1` and `axis2` and appending an index to the right equal
+ to the size of the resulting diagonals.
+
+ In versions of NumPy prior to 1.7, this function always returned a new,
+ independent array containing a copy of the values in the diagonal.
+
+ In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
+ but depending on this fact is deprecated. Writing to the resulting
+ array continues to work as it used to, but a FutureWarning is issued.
+
+ Starting in NumPy 1.9 it returns a read-only view on the original array.
+ Attempting to write to the resulting array will produce an error.
+
+ In some future release, it will return a read/write view and writing to
+ the returned array will alter your original array. The returned array
+ will have the same type as the input array.
+
+ If you don't write to the array returned by this function, then you can
+ just ignore all of the above.
+
+ If you depend on the current behavior, then we suggest copying the
+ returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
+ of just ``np.diagonal(a)``. This will work with both past and future
+ versions of NumPy.
+
+ Parameters
+ ----------
+ a : array_like
+ Array from which the diagonals are taken.
+ offset : int, optional
+ Offset of the diagonal from the main diagonal. Can be positive or
+ negative. Defaults to main diagonal (0).
+ axis1 : int, optional
+ Axis to be used as the first axis of the 2-D sub-arrays from which
+ the diagonals should be taken. Defaults to first axis (0).
+ axis2 : int, optional
+ Axis to be used as the second axis of the 2-D sub-arrays from
+ which the diagonals should be taken. Defaults to second axis (1).
+
+ Returns
+ -------
+ array_of_diagonals : ndarray
+ If `a` is 2-D, then a 1-D array containing the diagonal and of the
+ same type as `a` is returned unless `a` is a `matrix`, in which case
+ a 1-D array rather than a (2-D) `matrix` is returned in order to
+ maintain backward compatibility.
+
+ If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
+ are removed, and a new axis inserted at the end corresponding to the
+ diagonal.
+
+ Raises
+ ------
+ ValueError
+ If the dimension of `a` is less than 2.
+
+ See Also
+ --------
+ diag : MATLAB work-a-like for 1-D and 2-D arrays.
+ diagflat : Create diagonal arrays.
+ trace : Sum along diagonals.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(4).reshape(2,2)
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> a.diagonal()
+ array([0, 3])
+ >>> a.diagonal(1)
+ array([1])
+
+ A 3-D example:
+
+ >>> a = np.arange(8).reshape(2,2,2); a
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> a.diagonal(0, # Main diagonals of two arrays created by skipping
+ ... 0, # across the outer(left)-most axis last and
+ ... 1) # the "middle" (row) axis first.
+ array([[0, 6],
+ [1, 7]])
+
+ The sub-arrays whose main diagonals we just obtained; note that each
+ corresponds to fixing the right-most (column) axis, and that the
+ diagonals are "packed" in rows.
+
+ >>> a[:,:,0] # main diagonal is [0 6]
+ array([[0, 2],
+ [4, 6]])
+ >>> a[:,:,1] # main diagonal is [1 7]
+ array([[1, 3],
+ [5, 7]])
+
+ The anti-diagonal can be obtained by reversing the order of elements
+ using either `numpy.flipud` or `numpy.fliplr`.
+
+ >>> a = np.arange(9).reshape(3, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> np.fliplr(a).diagonal() # Horizontal flip
+ array([2, 4, 6])
+ >>> np.flipud(a).diagonal() # Vertical flip
+ array([6, 4, 2])
+
+ Note that the order in which the diagonal is retrieved varies depending
+ on the flip function.
+ """
+ if isinstance(a, np.matrix):
+ # Make diagonal of matrix 1-D to preserve backward compatibility.
+ return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+ else:
+ return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+
+
+def _trace_dispatcher(
+ a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_trace_dispatcher)
+def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+ """
+ Return the sum along diagonals of the array.
+
+ If `a` is 2-D, the sum along its diagonal with the given offset
+ is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
+
+ If `a` has more than two dimensions, then the axes specified by axis1 and
+ axis2 are used to determine the 2-D sub-arrays whose traces are returned.
+ The shape of the resulting array is the same as that of `a` with `axis1`
+ and `axis2` removed.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, from which the diagonals are taken.
+ offset : int, optional
+ Offset of the diagonal from the main diagonal. Can be both positive
+ and negative. Defaults to 0.
+ axis1, axis2 : int, optional
+ Axes to be used as the first and second axis of the 2-D sub-arrays
+ from which the diagonals should be taken. Defaults are the first two
+ axes of `a`.
+ dtype : dtype, optional
+ Determines the data-type of the returned array and of the accumulator
+ where the elements are summed. If dtype has the value None and `a` is
+ of integer type of precision less than the default integer
+ precision, then the default integer precision is used. Otherwise,
+ the precision is the same as that of `a`.
+ out : ndarray, optional
+ Array into which the output is placed. Its type is preserved and
+ it must be of the right shape to hold the output.
+
+ Returns
+ -------
+ sum_along_diagonals : ndarray
+ If `a` is 2-D, the sum along the diagonal is returned. If `a` has
+ larger dimensions, then an array of sums along diagonals is returned.
+
+ See Also
+ --------
+ diag, diagonal, diagflat
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.trace(np.eye(3))
+ 3.0
+ >>> a = np.arange(8).reshape((2,2,2))
+ >>> np.trace(a)
+ array([6, 8])
+
+ >>> a = np.arange(24).reshape((2,2,2,3))
+ >>> np.trace(a).shape
+ (2, 3)
+
+ """
+ if isinstance(a, np.matrix):
+ # Get trace of matrix via an array to preserve backward compatibility.
+ return asarray(a).trace(
+ offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out
+ )
+ else:
+ return asanyarray(a).trace(
+ offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out
+ )
+
+
+def _ravel_dispatcher(a, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_ravel_dispatcher)
+def ravel(a, order='C'):
+ """Return a contiguous flattened array.
+
+ A 1-D array, containing the elements of the input, is returned. A copy is
+ made only if needed.
+
+ As of NumPy 1.10, the returned array will have the same type as the input
+ array. (for example, a masked array will be returned for a masked array
+ input)
+
+ Parameters
+ ----------
+ a : array_like
+ Input array. The elements in `a` are read in the order specified by
+ `order`, and packed as a 1-D array.
+ order : {'C','F', 'A', 'K'}, optional
+
+ The elements of `a` are read using this index order. 'C' means
+ to index the elements in row-major, C-style order,
+ with the last axis index changing fastest, back to the first
+ axis index changing slowest. 'F' means to index the elements
+ in column-major, Fortran-style order, with the
+ first index changing fastest, and the last index changing
+ slowest. Note that the 'C' and 'F' options take no account of
+ the memory layout of the underlying array, and only refer to
+ the order of axis indexing. 'A' means to read the elements in
+ Fortran-like index order if `a` is Fortran *contiguous* in
+ memory, C-like order otherwise. 'K' means to read the
+ elements in the order they occur in memory, except for
+ reversing the data when strides are negative. By default, 'C'
+ index order is used.
+
+ Returns
+ -------
+ y : array_like
+ y is a contiguous 1-D array of the same subtype as `a`,
+ with shape ``(a.size,)``.
+ Note that matrices are special cased for backward compatibility,
+ if `a` is a matrix, then y is a 1-D ndarray.
+
+ See Also
+ --------
+ ndarray.flat : 1-D iterator over an array.
+ ndarray.flatten : 1-D array copy of the elements of an array
+ in row-major order.
+ ndarray.reshape : Change the shape of an array without changing its data.
+
+ Notes
+ -----
+ In row-major, C-style order, in two dimensions, the row index
+ varies the slowest, and the column index the quickest. This can
+ be generalized to multiple dimensions, where row-major order
+ implies that the index along the first axis varies slowest, and
+ the index along the last quickest. The opposite holds for
+ column-major, Fortran-style index ordering.
+
+ When a view is desired in as many cases as possible, ``arr.reshape(-1)``
+ may be preferable. However, ``ravel`` supports ``K`` in the optional
+ ``order`` argument while ``reshape`` does not.
+
+ Examples
+ --------
+ It is equivalent to ``reshape(-1, order=order)``.
+
+ >>> import numpy as np
+ >>> x = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> np.ravel(x)
+ array([1, 2, 3, 4, 5, 6])
+
+ >>> x.reshape(-1)
+ array([1, 2, 3, 4, 5, 6])
+
+ >>> np.ravel(x, order='F')
+ array([1, 4, 2, 5, 3, 6])
+
+ When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
+
+ >>> np.ravel(x.T)
+ array([1, 4, 2, 5, 3, 6])
+ >>> np.ravel(x.T, order='A')
+ array([1, 2, 3, 4, 5, 6])
+
+ When ``order`` is 'K', it will preserve orderings that are neither 'C'
+ nor 'F', but won't reverse axes:
+
+ >>> a = np.arange(3)[::-1]; a
+ array([2, 1, 0])
+ >>> a.ravel(order='C')
+ array([2, 1, 0])
+ >>> a.ravel(order='K')
+ array([2, 1, 0])
+
+ >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
+ array([[[ 0, 2, 4],
+ [ 1, 3, 5]],
+ [[ 6, 8, 10],
+ [ 7, 9, 11]]])
+ >>> a.ravel(order='C')
+ array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
+ >>> a.ravel(order='K')
+ array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+
+ """
+ if isinstance(a, np.matrix):
+ return asarray(a).ravel(order=order)
+ else:
+ return asanyarray(a).ravel(order=order)
+
+
+def _nonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_nonzero_dispatcher)
+def nonzero(a):
+ """
+ Return the indices of the elements that are non-zero.
+
+ Returns a tuple of arrays, one for each dimension of `a`,
+ containing the indices of the non-zero elements in that
+ dimension. The values in `a` are always tested and returned in
+ row-major, C-style order.
+
+ To group the indices by element, rather than dimension, use `argwhere`,
+ which returns a row for each non-zero element.
+
+ .. note::
+
+ When called on a zero-d array or scalar, ``nonzero(a)`` is treated
+ as ``nonzero(atleast_1d(a))``.
+
+ .. deprecated:: 1.17.0
+
+ Use `atleast_1d` explicitly if this behavior is deliberate.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+
+ Returns
+ -------
+ tuple_of_arrays : tuple
+ Indices of elements that are non-zero.
+
+ See Also
+ --------
+ flatnonzero :
+ Return indices that are non-zero in the flattened version of the input
+ array.
+ ndarray.nonzero :
+ Equivalent ndarray method.
+ count_nonzero :
+ Counts the number of non-zero elements in the input array.
+
+ Notes
+ -----
+ While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
+ recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
+ will correctly handle 0-d arrays.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
+ >>> x
+ array([[3, 0, 0],
+ [0, 4, 0],
+ [5, 6, 0]])
+ >>> np.nonzero(x)
+ (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
+
+ >>> x[np.nonzero(x)]
+ array([3, 4, 5, 6])
+ >>> np.transpose(np.nonzero(x))
+ array([[0, 0],
+ [1, 1],
+ [2, 0],
+ [2, 1]])
+
+ A common use for ``nonzero`` is to find the indices of an array, where
+ a condition is True. Given an array `a`, the condition `a` > 3 is a
+ boolean array and since False is interpreted as 0, np.nonzero(a > 3)
+ yields the indices of the `a` where the condition is true.
+
+ >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ >>> a > 3
+ array([[False, False, False],
+ [ True, True, True],
+ [ True, True, True]])
+ >>> np.nonzero(a > 3)
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ Using this result to index `a` is equivalent to using the mask directly:
+
+ >>> a[np.nonzero(a > 3)]
+ array([4, 5, 6, 7, 8, 9])
+ >>> a[a > 3] # prefer this spelling
+ array([4, 5, 6, 7, 8, 9])
+
+ ``nonzero`` can also be called as a method of the array.
+
+ >>> (a > 3).nonzero()
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ """
+ return _wrapfunc(a, 'nonzero')
+
+
+def _shape_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_shape_dispatcher)
+def shape(a):
+ """
+ Return the shape of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+
+ Returns
+ -------
+ shape : tuple of ints
+ The elements of the shape tuple give the lengths of the
+ corresponding array dimensions.
+
+ See Also
+ --------
+ len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
+ ``N>=1``.
+ ndarray.shape : Equivalent array method.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.shape(np.eye(3))
+ (3, 3)
+ >>> np.shape([[1, 3]])
+ (1, 2)
+ >>> np.shape([0])
+ (1,)
+ >>> np.shape(0)
+ ()
+
+ >>> a = np.array([(1, 2), (3, 4), (5, 6)],
+ ... dtype=[('x', 'i4'), ('y', 'i4')])
+ >>> np.shape(a)
+ (3,)
+ >>> a.shape
+ (3,)
+
+ """
+ try:
+ result = a.shape
+ except AttributeError:
+ result = asarray(a).shape
+ return result
+
+
+def _compress_dispatcher(condition, a, axis=None, out=None):
+ return (condition, a, out)
+
+
+@array_function_dispatch(_compress_dispatcher)
+def compress(condition, a, axis=None, out=None):
+ """
+ Return selected slices of an array along given axis.
+
+ When working along a given axis, a slice along that axis is returned in
+ `output` for each index where `condition` evaluates to True. When
+ working on a 1-D array, `compress` is equivalent to `extract`.
+
+ Parameters
+ ----------
+ condition : 1-D array of bools
+ Array that selects which entries to return. If len(condition)
+ is less than the size of `a` along the given axis, then output is
+ truncated to the length of the condition array.
+ a : array_like
+ Array from which to extract a part.
+ axis : int, optional
+ Axis along which to take slices. If None (default), work on the
+ flattened array.
+ out : ndarray, optional
+ Output array. Its type is preserved and it must be of the right
+ shape to hold the output.
+
+ Returns
+ -------
+ compressed_array : ndarray
+ A copy of `a` without the slices along axis for which `condition`
+ is false.
+
+ See Also
+ --------
+ take, choose, diag, diagonal, select
+ ndarray.compress : Equivalent method in ndarray
+ extract : Equivalent method when working on 1-D arrays
+ :ref:`ufuncs-output-type`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4], [5, 6]])
+ >>> a
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.compress([0, 1], a, axis=0)
+ array([[3, 4]])
+ >>> np.compress([False, True, True], a, axis=0)
+ array([[3, 4],
+ [5, 6]])
+ >>> np.compress([False, True], a, axis=1)
+ array([[2],
+ [4],
+ [6]])
+
+ Working on the flattened array does not return slices along an axis but
+ selects elements.
+
+ >>> np.compress([False, True], a)
+ array([2])
+
+ """
+ return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
+
+
+def _clip_dispatcher(a, a_min=None, a_max=None, out=None, *, min=None,
+ max=None, **kwargs):
+ return (a, a_min, a_max, out, min, max)
+
+
+@array_function_dispatch(_clip_dispatcher)
+def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *,
+ min=np._NoValue, max=np._NoValue, **kwargs):
+ """
+ Clip (limit) the values in an array.
+
+ Given an interval, values outside the interval are clipped to
+ the interval edges. For example, if an interval of ``[0, 1]``
+ is specified, values smaller than 0 become 0, and values larger
+ than 1 become 1.
+
+ Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
+
+ No check is performed to ensure ``a_min < a_max``.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing elements to clip.
+ a_min, a_max : array_like or None
+ Minimum and maximum value. If ``None``, clipping is not performed on
+ the corresponding edge. If both ``a_min`` and ``a_max`` are ``None``,
+ the elements of the returned array stay the same. Both are broadcasted
+ against ``a``.
+ out : ndarray, optional
+ The results will be placed in this array. It may be the input
+ array for in-place clipping. `out` must be of the right shape
+ to hold the output. Its type is preserved.
+ min, max : array_like or None
+ Array API compatible alternatives for ``a_min`` and ``a_max``
+ arguments. Either ``a_min`` and ``a_max`` or ``min`` and ``max``
+ can be passed at the same time. Default: ``None``.
+
+ .. versionadded:: 2.1.0
+ **kwargs
+ For other keyword-only arguments, see the
+ :ref:`ufunc docs `.
+
+ Returns
+ -------
+ clipped_array : ndarray
+ An array with the elements of `a`, but where values
+ < `a_min` are replaced with `a_min`, and those > `a_max`
+ with `a_max`.
+
+ See Also
+ --------
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ When `a_min` is greater than `a_max`, `clip` returns an
+ array in which all values are equal to `a_max`,
+ as shown in the second example.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.clip(a, 1, 8)
+ array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
+ >>> np.clip(a, 8, 1)
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
+ >>> np.clip(a, 3, 6, out=a)
+ array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+ >>> a
+ array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
+ array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
+
+ """
+ if a_min is np._NoValue and a_max is np._NoValue:
+ a_min = None if min is np._NoValue else min
+ a_max = None if max is np._NoValue else max
+ elif a_min is np._NoValue:
+ raise TypeError("clip() missing 1 required positional "
+ "argument: 'a_min'")
+ elif a_max is np._NoValue:
+ raise TypeError("clip() missing 1 required positional "
+ "argument: 'a_max'")
+ elif min is not np._NoValue or max is not np._NoValue:
+ raise ValueError("Passing `min` or `max` keyword argument when "
+ "`a_min` and `a_max` are provided is forbidden.")
+
+ return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
+
+
+def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_sum_dispatcher)
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Sum of array elements over a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Elements to sum.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a sum is performed. The default,
+ axis=None, will sum all of the elements of the input array. If
+ axis is negative it counts from the last to the first axis. If
+ axis is a tuple of ints, a sum is performed on all of the axes
+ specified in the tuple instead of a single axis or all the axes as
+ before.
+ dtype : dtype, optional
+ The type of the returned array and of the accumulator in which the
+ elements are summed. The dtype of `a` is used by default unless `a`
+ has an integer dtype of less precision than the default platform
+ integer. In that case, if `a` is signed then the platform integer
+ is used while if `a` is unsigned then an unsigned integer of the
+ same precision as the platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `sum` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+ where : array_like of bool, optional
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
+
+ Returns
+ -------
+ sum_along_axis : ndarray
+ An array with the same shape as `a`, with the specified
+ axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
+ is returned. If an output array is specified, a reference to
+ `out` is returned.
+
+ See Also
+ --------
+ ndarray.sum : Equivalent method.
+ add: ``numpy.add.reduce`` equivalent function.
+ cumsum : Cumulative sum of array elements.
+ trapezoid : Integration of array values using composite trapezoidal rule.
+
+ mean, average
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ The sum of an empty array is the neutral element 0:
+
+ >>> np.sum([])
+ 0.0
+
+ For floating point numbers the numerical precision of sum (and
+ ``np.add.reduce``) is in general limited by directly adding each number
+ individually to the result causing rounding errors in every step.
+ However, often numpy will use a numerically better approach (partial
+ pairwise summation) leading to improved precision in many use-cases.
+ This improved precision is always provided when no ``axis`` is given.
+ When ``axis`` is given, it will depend on which axis is summed.
+ Technically, to provide the best speed possible, the improved precision
+ is only used when the summation is along the fast axis in memory.
+ Note that the exact precision may vary depending on other parameters.
+ In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
+ more precise approach to summation.
+ Especially when summing a large number of lower precision floating point
+ numbers, such as ``float32``, numerical errors can become significant.
+ In such cases it can be advisable to use `dtype="float64"` to use a higher
+ precision for the output.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.sum([0.5, 1.5])
+ 2.0
+ >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
+ np.int32(1)
+ >>> np.sum([[0, 1], [0, 5]])
+ 6
+ >>> np.sum([[0, 1], [0, 5]], axis=0)
+ array([0, 6])
+ >>> np.sum([[0, 1], [0, 5]], axis=1)
+ array([1, 5])
+ >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
+ array([1., 5.])
+
+ If the accumulator is too small, overflow occurs:
+
+ >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
+ np.int8(-128)
+
+ You can also start the sum with a value other than zero:
+
+ >>> np.sum([10], initial=5)
+ 15
+ """
+ if isinstance(a, _gentype):
+ # 2018-02-25, 1.15.0
+ warnings.warn(
+ "Calling np.sum(generator) is deprecated, and in the future will "
+ "give a different result. Use np.sum(np.fromiter(generator)) or "
+ "the python sum builtin instead.",
+ DeprecationWarning, stacklevel=2
+ )
+
+ res = _sum_(a)
+ if out is not None:
+ out[...] = res
+ return out
+ return res
+
+ return _wrapreduction(
+ a, np.add, 'sum', axis, dtype, out,
+ keepdims=keepdims, initial=initial, where=where
+ )
+
+
+def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
+ where=np._NoValue):
+ return (a, where, out)
+
+
+@array_function_dispatch(_any_dispatcher)
+def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
+ """
+ Test whether any array element along a given axis evaluates to True.
+
+ Returns single boolean if `axis` is ``None``
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a logical OR reduction is performed.
+ The default (``axis=None``) is to perform a logical OR over all
+ the dimensions of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis. If this
+ is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output and its type is preserved
+ (e.g., if it is of type float, then it will remain so, returning
+ 1.0 for True and 0.0 for False, regardless of the type of `a`).
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `any` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in checking for any `True` values.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ any : bool or ndarray
+ A new boolean or `ndarray` is returned unless `out` is specified,
+ in which case a reference to `out` is returned.
+
+ See Also
+ --------
+ ndarray.any : equivalent method
+
+ all : Test whether all elements along a given axis evaluate to True.
+
+ Notes
+ -----
+ Not a Number (NaN), positive infinity and negative infinity evaluate
+ to `True` because these are not equal to zero.
+
+ .. versionchanged:: 2.0
+ Before NumPy 2.0, ``any`` did not return booleans for object dtype
+ input arrays.
+ This behavior is still available via ``np.logical_or.reduce``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.any([[True, False], [True, True]])
+ True
+
+ >>> np.any([[True, False, True ],
+ ... [False, False, False]], axis=0)
+ array([ True, False, True])
+
+ >>> np.any([-1, 0, 5])
+ True
+
+ >>> np.any([[np.nan], [np.inf]], axis=1, keepdims=True)
+ array([[ True],
+ [ True]])
+
+ >>> np.any([[True, False], [False, False]], where=[[False], [True]])
+ False
+
+ >>> a = np.array([[1, 0, 0],
+ ... [0, 0, 1],
+ ... [0, 0, 0]])
+ >>> np.any(a, axis=0)
+ array([ True, False, True])
+ >>> np.any(a, axis=1)
+ array([ True, True, False])
+
+ >>> o=np.array(False)
+ >>> z=np.any([-1, 4, 5], out=o)
+ >>> z, o
+ (array(True), array(True))
+ >>> # Check now that z is a reference to o
+ >>> z is o
+ True
+ >>> id(z), id(o) # identity of z and o # doctest: +SKIP
+ (191614240, 191614240)
+
+ """
+ return _wrapreduction_any_all(a, np.logical_or, 'any', axis, out,
+ keepdims=keepdims, where=where)
+
+
+def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
+ where=None):
+ return (a, where, out)
+
+
+@array_function_dispatch(_all_dispatcher)
+def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
+ """
+ Test whether all array elements along a given axis evaluate to True.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a logical AND reduction is performed.
+ The default (``axis=None``) is to perform a logical AND over all
+ the dimensions of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis. If this
+ is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternate output array in which to place the result.
+ It must have the same shape as the expected output and its
+ type is preserved (e.g., if ``dtype(out)`` is float, the result
+ will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type`
+ for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `all` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in checking for all `True` values.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ all : ndarray, bool
+ A new boolean or array is returned unless `out` is specified,
+ in which case a reference to `out` is returned.
+
+ See Also
+ --------
+ ndarray.all : equivalent method
+
+ any : Test whether any element along a given axis evaluates to True.
+
+ Notes
+ -----
+ Not a Number (NaN), positive infinity and negative infinity
+ evaluate to `True` because these are not equal to zero.
+
+ .. versionchanged:: 2.0
+ Before NumPy 2.0, ``all`` did not return booleans for object dtype
+ input arrays.
+ This behavior is still available via ``np.logical_and.reduce``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.all([[True,False],[True,True]])
+ False
+
+ >>> np.all([[True,False],[True,True]], axis=0)
+ array([ True, False])
+
+ >>> np.all([-1, 4, 5])
+ True
+
+ >>> np.all([1.0, np.nan])
+ True
+
+ >>> np.all([[True, True], [False, True]], where=[[True], [False]])
+ True
+
+ >>> o=np.array(False)
+ >>> z=np.all([-1, 4, 5], out=o)
+ >>> id(z), id(o), z
+ (28293632, 28293632, array(True)) # may vary
+
+ """
+ return _wrapreduction_any_all(a, np.logical_and, 'all', axis, out,
+ keepdims=keepdims, where=where)
+
+
+def _cumulative_func(x, func, axis, dtype, out, include_initial):
+ x = np.atleast_1d(x)
+ x_ndim = x.ndim
+ if axis is None:
+ if x_ndim >= 2:
+ raise ValueError("For arrays which have more than one dimension "
+ "``axis`` argument is required.")
+ axis = 0
+
+ if out is not None and include_initial:
+ item = [slice(None)] * x_ndim
+ item[axis] = slice(1, None)
+ func.accumulate(x, axis=axis, dtype=dtype, out=out[tuple(item)])
+ item[axis] = 0
+ out[tuple(item)] = func.identity
+ return out
+
+ res = func.accumulate(x, axis=axis, dtype=dtype, out=out)
+ if include_initial:
+ initial_shape = list(x.shape)
+ initial_shape[axis] = 1
+ res = np.concat(
+ [np.full_like(res, func.identity, shape=initial_shape), res],
+ axis=axis,
+ )
+
+ return res
+
+
+def _cumulative_prod_dispatcher(x, /, *, axis=None, dtype=None, out=None,
+ include_initial=None):
+ return (x, out)
+
+
+@array_function_dispatch(_cumulative_prod_dispatcher)
+def cumulative_prod(x, /, *, axis=None, dtype=None, out=None,
+ include_initial=False):
+ """
+ Return the cumulative product of elements along a given axis.
+
+ This function is an Array API compatible alternative to `numpy.cumprod`.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative product is computed. The default
+ (None) is only allowed for one-dimensional arrays. For arrays
+ with more than one dimension ``axis`` is required.
+ dtype : dtype, optional
+ Type of the returned array, as well as of the accumulator in which
+ the elements are multiplied. If ``dtype`` is not specified, it
+ defaults to the dtype of ``x``, unless ``x`` has an integer dtype
+ with a precision less than that of the default platform integer.
+ In that case, the default platform integer is used instead.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type of the resulting values will be cast if necessary.
+ See :ref:`ufuncs-output-type` for more details.
+ include_initial : bool, optional
+ Boolean indicating whether to include the initial value (ones) as
+ the first value in the output. With ``include_initial=True``
+ the shape of the output is different than the shape of the input.
+ Default: ``False``.
+
+ Returns
+ -------
+ cumulative_prod_along_axis : ndarray
+ A new array holding the result is returned unless ``out`` is
+ specified, in which case a reference to ``out`` is returned. The
+ result has the same shape as ``x`` if ``include_initial=False``.
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2, 3])
+ >>> np.cumulative_prod(a) # intermediate results 1, 1*2
+ ... # total product 1*2*3 = 6
+ array([1, 2, 6])
+ >>> a = np.array([1, 2, 3, 4, 5, 6])
+ >>> np.cumulative_prod(a, dtype=float) # specify type of output
+ array([ 1., 2., 6., 24., 120., 720.])
+
+ The cumulative product for each column (i.e., over the rows) of ``b``:
+
+ >>> b = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> np.cumulative_prod(b, axis=0)
+ array([[ 1, 2, 3],
+ [ 4, 10, 18]])
+
+ The cumulative product for each row (i.e. over the columns) of ``b``:
+
+ >>> np.cumulative_prod(b, axis=1)
+ array([[ 1, 2, 6],
+ [ 4, 20, 120]])
+
+ """
+ return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial)
+
+
+def _cumulative_sum_dispatcher(x, /, *, axis=None, dtype=None, out=None,
+ include_initial=None):
+ return (x, out)
+
+
+@array_function_dispatch(_cumulative_sum_dispatcher)
+def cumulative_sum(x, /, *, axis=None, dtype=None, out=None,
+ include_initial=False):
+ """
+ Return the cumulative sum of the elements along a given axis.
+
+ This function is an Array API compatible alternative to `numpy.cumsum`.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative sum is computed. The default
+ (None) is only allowed for one-dimensional arrays. For arrays
+ with more than one dimension ``axis`` is required.
+ dtype : dtype, optional
+ Type of the returned array and of the accumulator in which the
+ elements are summed. If ``dtype`` is not specified, it defaults
+ to the dtype of ``x``, unless ``x`` has an integer dtype with
+ a precision less than that of the default platform integer.
+ In that case, the default platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type`
+ for more details.
+ include_initial : bool, optional
+ Boolean indicating whether to include the initial value (zeros) as
+ the first value in the output. With ``include_initial=True``
+ the shape of the output is different than the shape of the input.
+ Default: ``False``.
+
+ Returns
+ -------
+ cumulative_sum_along_axis : ndarray
+ A new array holding the result is returned unless ``out`` is
+ specified, in which case a reference to ``out`` is returned. The
+ result has the same shape as ``x`` if ``include_initial=False``.
+
+ See Also
+ --------
+ sum : Sum array elements.
+ trapezoid : Integration of array values using composite trapezoidal rule.
+ diff : Calculate the n-th discrete difference along given axis.
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ ``cumulative_sum(a)[-1]`` may not be equal to ``sum(a)`` for
+ floating-point values since ``sum`` may use a pairwise summation routine,
+ reducing the roundoff-error. See `sum` for more information.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2, 3, 4, 5, 6])
+ >>> a
+ array([1, 2, 3, 4, 5, 6])
+ >>> np.cumulative_sum(a)
+ array([ 1, 3, 6, 10, 15, 21])
+ >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s)
+ array([ 1., 3., 6., 10., 15., 21.])
+
+ >>> b = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> np.cumulative_sum(b,axis=0) # sum over rows for each of the 3 columns
+ array([[1, 2, 3],
+ [5, 7, 9]])
+ >>> np.cumulative_sum(b,axis=1) # sum over columns for each of the 2 rows
+ array([[ 1, 3, 6],
+ [ 4, 9, 15]])
+
+ ``cumulative_sum(c)[-1]`` may not be equal to ``sum(c)``
+
+ >>> c = np.array([1, 2e-9, 3e-9] * 1000000)
+ >>> np.cumulative_sum(c)[-1]
+ 1000000.0050045159
+ >>> c.sum()
+ 1000000.0050000029
+
+ """
+ return _cumulative_func(x, um.add, axis, dtype, out, include_initial)
+
+
+def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumsum_dispatcher)
+def cumsum(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative sum of the elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative sum is computed. The default
+ (None) is to compute the cumsum over the flattened array.
+ dtype : dtype, optional
+ Type of the returned array and of the accumulator in which the
+ elements are summed. If `dtype` is not specified, it defaults
+ to the dtype of `a`, unless `a` has an integer dtype with a
+ precision less than that of the default platform integer. In
+ that case, the default platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type`
+ for more details.
+
+ Returns
+ -------
+ cumsum_along_axis : ndarray.
+ A new array holding the result is returned unless `out` is
+ specified, in which case a reference to `out` is returned. The
+ result has the same size as `a`, and the same shape as `a` if
+ `axis` is not None or `a` is a 1-d array.
+
+ See Also
+ --------
+ cumulative_sum : Array API compatible alternative for ``cumsum``.
+ sum : Sum array elements.
+ trapezoid : Integration of array values using composite trapezoidal rule.
+ diff : Calculate the n-th discrete difference along given axis.
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point
+ values since ``sum`` may use a pairwise summation routine, reducing
+ the roundoff-error. See `sum` for more information.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1,2,3], [4,5,6]])
+ >>> a
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.cumsum(a)
+ array([ 1, 3, 6, 10, 15, 21])
+ >>> np.cumsum(a, dtype=float) # specifies type of output value(s)
+ array([ 1., 3., 6., 10., 15., 21.])
+
+ >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
+ array([[1, 2, 3],
+ [5, 7, 9]])
+ >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
+ array([[ 1, 3, 6],
+ [ 4, 9, 15]])
+
+ ``cumsum(b)[-1]`` may not be equal to ``sum(b)``
+
+ >>> b = np.array([1, 2e-9, 3e-9] * 1000000)
+ >>> b.cumsum()[-1]
+ 1000000.0050045159
+ >>> b.sum()
+ 1000000.0050000029
+
+ """
+ return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
+
+
+def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_ptp_dispatcher)
+def ptp(a, axis=None, out=None, keepdims=np._NoValue):
+ """
+ Range of values (maximum - minimum) along an axis.
+
+ The name of the function comes from the acronym for 'peak to peak'.
+
+ .. warning::
+ `ptp` preserves the data type of the array. This means the
+ return value for an input of signed integers with n bits
+ (e.g. `numpy.int8`, `numpy.int16`, etc) is also a signed integer
+ with n bits. In that case, peak-to-peak values greater than
+ ``2**(n-1)-1`` will be returned as negative values. An example
+ with a work-around is shown below.
+
+ Parameters
+ ----------
+ a : array_like
+ Input values.
+ axis : None or int or tuple of ints, optional
+ Axis along which to find the peaks. By default, flatten the
+ array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : array_like
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type of the output values will be cast if necessary.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `ptp` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ ptp : ndarray or scalar
+ The range of a given array - `scalar` if array is one-dimensional
+ or a new array holding the result along the given axis
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([[4, 9, 2, 10],
+ ... [6, 9, 7, 12]])
+
+ >>> np.ptp(x, axis=1)
+ array([8, 6])
+
+ >>> np.ptp(x, axis=0)
+ array([2, 0, 5, 2])
+
+ >>> np.ptp(x)
+ 10
+
+ This example shows that a negative value can be returned when
+ the input is an array of signed integers.
+
+ >>> y = np.array([[1, 127],
+ ... [0, 127],
+ ... [-1, 127],
+ ... [-2, 127]], dtype=np.int8)
+ >>> np.ptp(y, axis=1)
+ array([ 126, 127, -128, -127], dtype=int8)
+
+ A work-around is to use the `view()` method to view the result as
+ unsigned integers with the same bit width:
+
+ >>> np.ptp(y, axis=1).view(np.uint8)
+ array([126, 127, 128, 129], dtype=uint8)
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return _methods._ptp(a, axis=axis, out=out, **kwargs)
+
+
+def _max_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+ where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_max_dispatcher)
+@set_module('numpy')
+def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the maximum of an array or maximum along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used. If this is a tuple of ints, the maximum is selected over
+ multiple axes, instead of a single axis or all the axes as before.
+
+ out : ndarray, optional
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the ``max`` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ where : array_like of bool, optional
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
+ for details.
+
+ Returns
+ -------
+ max : ndarray or scalar
+ Maximum of `a`. If `axis` is None, the result is a scalar value.
+ If `axis` is an int, the result is an array of dimension
+ ``a.ndim - 1``. If `axis` is a tuple, the result is an array of
+ dimension ``a.ndim - len(axis)``.
+
+ See Also
+ --------
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+ maximum :
+ Element-wise maximum of two arrays, propagating any NaNs.
+ fmax :
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ argmax :
+ Return the indices of the maximum values.
+
+ nanmin, minimum, fmin
+
+ Notes
+ -----
+ NaN values are propagated, that is if at least one item is NaN, the
+ corresponding max value will be NaN as well. To ignore NaN values
+ (MATLAB behavior), please use nanmax.
+
+ Don't use `~numpy.max` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
+ ``max(a, axis=0)``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(4).reshape((2,2))
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> np.max(a) # Maximum of the flattened array
+ 3
+ >>> np.max(a, axis=0) # Maxima along the first axis
+ array([2, 3])
+ >>> np.max(a, axis=1) # Maxima along the second axis
+ array([1, 3])
+ >>> np.max(a, where=[False, True], initial=-1, axis=0)
+ array([-1, 3])
+ >>> b = np.arange(5, dtype=float)
+ >>> b[2] = np.nan
+ >>> np.max(b)
+ np.float64(nan)
+ >>> np.max(b, where=~np.isnan(b), initial=-1)
+ 4.0
+ >>> np.nanmax(b)
+ 4.0
+
+ You can use an initial value to compute the maximum of an empty slice, or
+ to initialize it to a different value:
+
+ >>> np.max([[-50], [10]], axis=-1, initial=0)
+ array([ 0, 10])
+
+ Notice that the initial value is used as one of the elements for which the
+ maximum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ >>> np.max([5], initial=6)
+ 6
+ >>> max([5], default=6)
+ 5
+ """
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+@array_function_dispatch(_max_dispatcher)
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the maximum of an array or maximum along an axis.
+
+ `amax` is an alias of `~numpy.max`.
+
+ See Also
+ --------
+ max : alias of this function
+ ndarray.max : equivalent method
+ """
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+def _min_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+ where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_min_dispatcher)
+def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the minimum of an array or minimum along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used.
+
+ If this is a tuple of ints, the minimum is selected over multiple axes,
+ instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the ``min`` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ where : array_like of bool, optional
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
+ for details.
+
+ Returns
+ -------
+ min : ndarray or scalar
+ Minimum of `a`. If `axis` is None, the result is a scalar value.
+ If `axis` is an int, the result is an array of dimension
+ ``a.ndim - 1``. If `axis` is a tuple, the result is an array of
+ dimension ``a.ndim - len(axis)``.
+
+ See Also
+ --------
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+ minimum :
+ Element-wise minimum of two arrays, propagating any NaNs.
+ fmin :
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ argmin :
+ Return the indices of the minimum values.
+
+ nanmax, maximum, fmax
+
+ Notes
+ -----
+ NaN values are propagated, that is if at least one item is NaN, the
+ corresponding min value will be NaN as well. To ignore NaN values
+ (MATLAB behavior), please use nanmin.
+
+ Don't use `~numpy.min` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
+ ``min(a, axis=0)``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(4).reshape((2,2))
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> np.min(a) # Minimum of the flattened array
+ 0
+ >>> np.min(a, axis=0) # Minima along the first axis
+ array([0, 1])
+ >>> np.min(a, axis=1) # Minima along the second axis
+ array([0, 2])
+ >>> np.min(a, where=[False, True], initial=10, axis=0)
+ array([10, 1])
+
+ >>> b = np.arange(5, dtype=float)
+ >>> b[2] = np.nan
+ >>> np.min(b)
+ np.float64(nan)
+ >>> np.min(b, where=~np.isnan(b), initial=10)
+ 0.0
+ >>> np.nanmin(b)
+ 0.0
+
+ >>> np.min([[-50], [10]], axis=-1, initial=0)
+ array([-50, 0])
+
+ Notice that the initial value is used as one of the elements for which the
+ minimum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ Notice that this isn't the same as Python's ``default`` argument.
+
+ >>> np.min([6], initial=5)
+ 5
+ >>> min([6], default=5)
+ 6
+ """
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+@array_function_dispatch(_min_dispatcher)
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the minimum of an array or minimum along an axis.
+
+ `amin` is an alias of `~numpy.min`.
+
+ See Also
+ --------
+ min : alias of this function
+ ndarray.min : equivalent method
+ """
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_prod_dispatcher)
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Return the product of array elements over a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a product is performed. The default,
+ axis=None, will calculate the product of all the elements in the
+ input array. If axis is negative it counts from the last to the
+ first axis.
+
+ If axis is a tuple of ints, a product is performed on all of the
+ axes specified in the tuple instead of a single axis or all the
+ axes as before.
+ dtype : dtype, optional
+ The type of the returned array, as well as of the accumulator in
+ which the elements are multiplied. The dtype of `a` is used by
+ default unless `a` has an integer dtype of less precision than the
+ default platform integer. In that case, if `a` is signed then the
+ platform integer is used while if `a` is unsigned then an unsigned
+ integer of the same precision as the platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in the
+ result as dimensions with size one. With this option, the result
+ will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `prod` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce`
+ for details.
+ where : array_like of bool, optional
+ Elements to include in the product. See `~numpy.ufunc.reduce`
+ for details.
+
+ Returns
+ -------
+ product_along_axis : ndarray, see `dtype` parameter above.
+ An array shaped as `a` but with the specified axis removed.
+ Returns a reference to `out` if specified.
+
+ See Also
+ --------
+ ndarray.prod : equivalent method
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow. That means that, on a 32-bit platform:
+
+ >>> x = np.array([536870910, 536870910, 536870910, 536870910])
+ >>> np.prod(x)
+ 16 # may vary
+
+ The product of an empty array is the neutral element 1:
+
+ >>> np.prod([])
+ 1.0
+
+ Examples
+ --------
+ By default, calculate the product of all elements:
+
+ >>> import numpy as np
+ >>> np.prod([1.,2.])
+ 2.0
+
+ Even when the input array is two-dimensional:
+
+ >>> a = np.array([[1., 2.], [3., 4.]])
+ >>> np.prod(a)
+ 24.0
+
+ But we can also specify the axis over which to multiply:
+
+ >>> np.prod(a, axis=1)
+ array([ 2., 12.])
+ >>> np.prod(a, axis=0)
+ array([3., 8.])
+
+ Or select specific elements to include:
+
+ >>> np.prod([1., np.nan, 3.], where=[True, False, True])
+ 3.0
+
+ If the type of `x` is unsigned, then the output type is
+ the unsigned platform integer:
+
+ >>> x = np.array([1, 2, 3], dtype=np.uint8)
+ >>> np.prod(x).dtype == np.uint
+ True
+
+ If `x` is of a signed integer type, then the output type
+ is the default platform integer:
+
+ >>> x = np.array([1, 2, 3], dtype=np.int8)
+ >>> np.prod(x).dtype == int
+ True
+
+ You can also start the product with a value other than one:
+
+ >>> np.prod([1, 2], initial=5)
+ 10
+ """
+ return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumprod_dispatcher)
+def cumprod(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative product of elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative product is computed. By default
+ the input is flattened.
+ dtype : dtype, optional
+ Type of the returned array, as well as of the accumulator in which
+ the elements are multiplied. If *dtype* is not specified, it
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
+ a precision less than that of the default platform integer. In
+ that case, the default platform integer is used instead.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type of the resulting values will be cast if necessary.
+
+ Returns
+ -------
+ cumprod : ndarray
+ A new array holding the result is returned unless `out` is
+ specified, in which case a reference to out is returned.
+
+ See Also
+ --------
+ cumulative_prod : Array API compatible alternative for ``cumprod``.
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1,2,3])
+ >>> np.cumprod(a) # intermediate results 1, 1*2
+ ... # total product 1*2*3 = 6
+ array([1, 2, 6])
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> np.cumprod(a, dtype=float) # specify type of output
+ array([ 1., 2., 6., 24., 120., 720.])
+
+ The cumulative product for each column (i.e., over the rows) of `a`:
+
+ >>> np.cumprod(a, axis=0)
+ array([[ 1, 2, 3],
+ [ 4, 10, 18]])
+
+ The cumulative product for each row (i.e. over the columns) of `a`:
+
+ >>> np.cumprod(a,axis=1)
+ array([[ 1, 2, 6],
+ [ 4, 20, 120]])
+
+ """
+ return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
+
+
+def _ndim_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_ndim_dispatcher)
+def ndim(a):
+ """
+ Return the number of dimensions of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array. If it is not already an ndarray, a conversion is
+ attempted.
+
+ Returns
+ -------
+ number_of_dimensions : int
+ The number of dimensions in `a`. Scalars are zero-dimensional.
+
+ See Also
+ --------
+ ndarray.ndim : equivalent method
+ shape : dimensions of array
+ ndarray.shape : dimensions of array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.ndim([[1,2,3],[4,5,6]])
+ 2
+ >>> np.ndim(np.array([[1,2,3],[4,5,6]]))
+ 2
+ >>> np.ndim(1)
+ 0
+
+ """
+ try:
+ return a.ndim
+ except AttributeError:
+ return asarray(a).ndim
+
+
+def _size_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_size_dispatcher)
+def size(a, axis=None):
+ """
+ Return the number of elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which the elements are counted. By default, give
+ the total number of elements.
+
+ Returns
+ -------
+ element_count : int
+ Number of elements along the specified axis.
+
+ See Also
+ --------
+ shape : dimensions of array
+ ndarray.shape : dimensions of array
+ ndarray.size : number of elements in array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1,2,3],[4,5,6]])
+ >>> np.size(a)
+ 6
+ >>> np.size(a,1)
+ 3
+ >>> np.size(a,0)
+ 2
+
+ """
+ if axis is None:
+ try:
+ return a.size
+ except AttributeError:
+ return asarray(a).size
+ else:
+ try:
+ return a.shape[axis]
+ except AttributeError:
+ return asarray(a).shape[axis]
+
+
+def _round_dispatcher(a, decimals=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_round_dispatcher)
+def round(a, decimals=0, out=None):
+ """
+ Evenly round to the given number of decimals.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ decimals : int, optional
+ Number of decimal places to round to (default: 0). If
+ decimals is negative, it specifies the number of positions to
+ the left of the decimal point.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary. See :ref:`ufuncs-output-type`
+ for more details.
+
+ Returns
+ -------
+ rounded_array : ndarray
+ An array of the same type as `a`, containing the rounded values.
+ Unless `out` was specified, a new array is created. A reference to
+ the result is returned.
+
+ The real and imaginary parts of complex numbers are rounded
+ separately. The result of rounding a float is a float.
+
+ See Also
+ --------
+ ndarray.round : equivalent method
+ around : an alias for this function
+ ceil, fix, floor, rint, trunc
+
+
+ Notes
+ -----
+ For values exactly halfway between rounded decimal values, NumPy
+ rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
+ -0.5 and 0.5 round to 0.0, etc.
+
+ ``np.round`` uses a fast but sometimes inexact algorithm to round
+ floating-point datatypes. For positive `decimals` it is equivalent to
+ ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
+ error due to the inexact representation of decimal fractions in the IEEE
+ floating point standard [1]_ and errors introduced when scaling by powers
+ of ten. For instance, note the extra "1" in the following:
+
+ >>> np.round(56294995342131.5, 3)
+ 56294995342131.51
+
+ If your goal is to print such values with a fixed number of decimals, it is
+ preferable to use numpy's float printing routines to limit the number of
+ printed decimals:
+
+ >>> np.format_float_positional(56294995342131.5, precision=3)
+ '56294995342131.5'
+
+ The float printing routines use an accurate but much more computationally
+ demanding algorithm to compute the number of digits after the decimal
+ point.
+
+ Alternatively, Python's builtin `round` function uses a more accurate
+ but slower algorithm for 64-bit floating point values:
+
+ >>> round(56294995342131.5, 3)
+ 56294995342131.5
+ >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
+ (16.06, 16.05)
+
+
+ References
+ ----------
+ .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
+ https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.round([0.37, 1.64])
+ array([0., 2.])
+ >>> np.round([0.37, 1.64], decimals=1)
+ array([0.4, 1.6])
+ >>> np.round([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
+ array([0., 2., 2., 4., 4.])
+ >>> np.round([1,2,3,11], decimals=1) # ndarray of ints is returned
+ array([ 1, 2, 3, 11])
+ >>> np.round([1,2,3,11], decimals=-1)
+ array([ 0, 0, 0, 10])
+
+ """
+ return _wrapfunc(a, 'round', decimals=decimals, out=out)
+
+
+@array_function_dispatch(_round_dispatcher)
+def around(a, decimals=0, out=None):
+ """
+ Round an array to the given number of decimals.
+
+ `around` is an alias of `~numpy.round`.
+
+ See Also
+ --------
+ ndarray.round : equivalent method
+ round : alias for this function
+ ceil, fix, floor, rint, trunc
+
+ """
+ return _wrapfunc(a, 'round', decimals=decimals, out=out)
+
+
+def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
+ where=None):
+ return (a, where, out)
+
+
+@array_function_dispatch(_mean_dispatcher)
+def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
+ where=np._NoValue):
+ """
+ Compute the arithmetic mean along the specified axis.
+
+ Returns the average of the array elements. The average is taken over
+ the flattened array by default, otherwise over the specified axis.
+ `float64` intermediate and return values are used for integer inputs.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose mean is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the means are computed. The default is to
+ compute the mean of the flattened array.
+
+ If this is a tuple of ints, a mean is performed over multiple axes,
+ instead of a single axis or all the axes as before.
+ dtype : data-type, optional
+ Type to use in computing the mean. For integer inputs, the default
+ is `float64`; for floating point inputs, it is the same as the
+ input dtype.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See :ref:`ufuncs-output-type` for more details.
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `mean` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ m : ndarray, see dtype parameter above
+ If `out=None`, returns a new array containing the mean values,
+ otherwise a reference to the output array is returned.
+
+ See Also
+ --------
+ average : Weighted average
+ std, var, nanmean, nanstd, nanvar
+
+ Notes
+ -----
+ The arithmetic mean is the sum of the elements along the axis divided
+ by the number of elements.
+
+ Note that for floating-point input, the mean is computed using the
+ same precision the input has. Depending on the input data, this can
+ cause the results to be inaccurate, especially for `float32` (see
+ example below). Specifying a higher-precision accumulator using the
+ `dtype` keyword can alleviate this issue.
+
+ By default, `float16` results are computed using `float32` intermediates
+ for extra precision.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.mean(a)
+ 2.5
+ >>> np.mean(a, axis=0)
+ array([2., 3.])
+ >>> np.mean(a, axis=1)
+ array([1.5, 3.5])
+
+ In single precision, `mean` can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.mean(a)
+ np.float32(0.54999924)
+
+ Computing the mean in float64 is more accurate:
+
+ >>> np.mean(a, dtype=np.float64)
+ 0.55000000074505806 # may vary
+
+ Computing the mean in timedelta64 is available:
+
+ >>> b = np.array([1, 3], dtype="timedelta64[D]")
+ >>> np.mean(b)
+ np.timedelta64(2,'D')
+
+ Specifying a where argument:
+
+ >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
+ >>> np.mean(a)
+ 12.0
+ >>> np.mean(a, where=[[True], [False], [False]])
+ 9.0
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if where is not np._NoValue:
+ kwargs['where'] = where
+ if type(a) is not mu.ndarray:
+ try:
+ mean = a.mean
+ except AttributeError:
+ pass
+ else:
+ return mean(axis=axis, dtype=dtype, out=out, **kwargs)
+
+ return _methods._mean(a, axis=axis, dtype=dtype,
+ out=out, **kwargs)
+
+
+def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None, mean=None, correction=None):
+ return (a, where, out, mean)
+
+
+@array_function_dispatch(_std_dispatcher)
+def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
+ where=np._NoValue, mean=np._NoValue, correction=np._NoValue):
+ r"""
+ Compute the standard deviation along the specified axis.
+
+ Returns the standard deviation, a measure of the spread of a distribution,
+ of the array elements. The standard deviation is computed for the
+ flattened array by default, otherwise over the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Calculate the standard deviation of these values.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the standard deviation is computed. The
+ default is to compute the standard deviation of the flattened array.
+ If this is a tuple of ints, a standard deviation is performed over
+ multiple axes, instead of a single axis or all the axes as before.
+ dtype : dtype, optional
+ Type to use in computing the standard deviation. For arrays of
+ integer type the default is float64, for arrays of float types it is
+ the same as the array type.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output but the type (of the calculated
+ values) will be cast if necessary.
+ See :ref:`ufuncs-output-type` for more details.
+ ddof : {int, float}, optional
+ Means Delta Degrees of Freedom. The divisor used in calculations
+ is ``N - ddof``, where ``N`` represents the number of elements.
+ By default `ddof` is zero. See Notes for details about use of `ddof`.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `std` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ where : array_like of bool, optional
+ Elements to include in the standard deviation.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ mean : array_like, optional
+ Provide the mean to prevent its recalculation. The mean should have
+ a shape as if it was calculated with ``keepdims=True``.
+ The axis for the calculation of the mean should be the same as used in
+ the call to this std function.
+
+ .. versionadded:: 2.0.0
+
+ correction : {int, float}, optional
+ Array API compatible name for the ``ddof`` parameter. Only one of them
+ can be provided at the same time.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ standard_deviation : ndarray, see dtype parameter above.
+ If `out` is None, return a new array containing the standard deviation,
+ otherwise return a reference to the output array.
+
+ See Also
+ --------
+ var, mean, nanmean, nanstd, nanvar
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ There are several common variants of the array standard deviation
+ calculation. Assuming the input `a` is a one-dimensional NumPy array
+ and ``mean`` is either provided as an argument or computed as
+ ``a.mean()``, NumPy computes the standard deviation of an array as::
+
+ N = len(a)
+ d2 = abs(a - mean)**2 # abs is for complex `a`
+ var = d2.sum() / (N - ddof) # note use of `ddof`
+ std = var**0.5
+
+ Different values of the argument `ddof` are useful in different
+ contexts. NumPy's default ``ddof=0`` corresponds with the expression:
+
+ .. math::
+
+ \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N}}
+
+ which is sometimes called the "population standard deviation" in the field
+ of statistics because it applies the definition of standard deviation to
+ `a` as if `a` were a complete population of possible observations.
+
+ Many other libraries define the standard deviation of an array
+ differently, e.g.:
+
+ .. math::
+
+ \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N - 1}}
+
+ In statistics, the resulting quantity is sometimes called the "sample
+ standard deviation" because if `a` is a random sample from a larger
+ population, this calculation provides the square root of an unbiased
+ estimate of the variance of the population. The use of :math:`N-1` in the
+ denominator is often called "Bessel's correction" because it corrects for
+ bias (toward lower values) in the variance estimate introduced when the
+ sample mean of `a` is used in place of the true mean of the population.
+ The resulting estimate of the standard deviation is still biased, but less
+ than it would have been without the correction. For this quantity, use
+ ``ddof=1``.
+
+ Note that, for complex numbers, `std` takes the absolute
+ value before squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the standard deviation is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for float32 (see example below).
+ Specifying a higher-accuracy accumulator using the `dtype` keyword can
+ alleviate this issue.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.std(a)
+ 1.1180339887498949 # may vary
+ >>> np.std(a, axis=0)
+ array([1., 1.])
+ >>> np.std(a, axis=1)
+ array([0.5, 0.5])
+
+ In single precision, std() can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.std(a)
+ np.float32(0.45000005)
+
+ Computing the standard deviation in float64 is more accurate:
+
+ >>> np.std(a, dtype=np.float64)
+ 0.44999999925494177 # may vary
+
+ Specifying a where argument:
+
+ >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+ >>> np.std(a)
+ 2.614064523559687 # may vary
+ >>> np.std(a, where=[[True], [True], [False]])
+ 2.0
+
+ Using the mean keyword to save computation time:
+
+ >>> import numpy as np
+ >>> from timeit import timeit
+ >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+ >>> mean = np.mean(a, axis=1, keepdims=True)
+ >>>
+ >>> g = globals()
+ >>> n = 10000
+ >>> t1 = timeit("std = np.std(a, axis=1, mean=mean)", globals=g, number=n)
+ >>> t2 = timeit("std = np.std(a, axis=1)", globals=g, number=n)
+ >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%')
+ #doctest: +SKIP
+ Percentage execution time saved 30%
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if where is not np._NoValue:
+ kwargs['where'] = where
+ if mean is not np._NoValue:
+ kwargs['mean'] = mean
+
+ if correction != np._NoValue:
+ if ddof != 0:
+ raise ValueError(
+ "ddof and correction can't be provided simultaneously."
+ )
+ else:
+ ddof = correction
+
+ if type(a) is not mu.ndarray:
+ try:
+ std = a.std
+ except AttributeError:
+ pass
+ else:
+ return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+ return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ **kwargs)
+
+
+def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None, mean=None, correction=None):
+ return (a, where, out, mean)
+
+
+@array_function_dispatch(_var_dispatcher)
+def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
+ where=np._NoValue, mean=np._NoValue, correction=np._NoValue):
+ r"""
+ Compute the variance along the specified axis.
+
+ Returns the variance of the array elements, a measure of the spread of a
+ distribution. The variance is computed for the flattened array by
+ default, otherwise over the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose variance is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the variance is computed. The default is to
+ compute the variance of the flattened array.
+ If this is a tuple of ints, a variance is performed over multiple axes,
+ instead of a single axis or all the axes as before.
+ dtype : data-type, optional
+ Type to use in computing the variance. For arrays of integer type
+ the default is `float64`; for arrays of float types it is the same as
+ the array type.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output, but the type is cast if
+ necessary.
+ ddof : {int, float}, optional
+ "Delta Degrees of Freedom": the divisor used in the calculation is
+ ``N - ddof``, where ``N`` represents the number of elements. By
+ default `ddof` is zero. See notes for details about use of `ddof`.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `var` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ where : array_like of bool, optional
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
+ details.
+
+ .. versionadded:: 1.20.0
+
+ mean : array like, optional
+ Provide the mean to prevent its recalculation. The mean should have
+ a shape as if it was calculated with ``keepdims=True``.
+ The axis for the calculation of the mean should be the same as used in
+ the call to this var function.
+
+ .. versionadded:: 2.0.0
+
+ correction : {int, float}, optional
+ Array API compatible name for the ``ddof`` parameter. Only one of them
+ can be provided at the same time.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ variance : ndarray, see dtype parameter above
+ If ``out=None``, returns a new array containing the variance;
+ otherwise, a reference to the output array is returned.
+
+ See Also
+ --------
+ std, mean, nanmean, nanstd, nanvar
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ There are several common variants of the array variance calculation.
+ Assuming the input `a` is a one-dimensional NumPy array and ``mean`` is
+ either provided as an argument or computed as ``a.mean()``, NumPy
+ computes the variance of an array as::
+
+ N = len(a)
+ d2 = abs(a - mean)**2 # abs is for complex `a`
+ var = d2.sum() / (N - ddof) # note use of `ddof`
+
+ Different values of the argument `ddof` are useful in different
+ contexts. NumPy's default ``ddof=0`` corresponds with the expression:
+
+ .. math::
+
+ \frac{\sum_i{|a_i - \bar{a}|^2 }}{N}
+
+ which is sometimes called the "population variance" in the field of
+ statistics because it applies the definition of variance to `a` as if `a`
+ were a complete population of possible observations.
+
+ Many other libraries define the variance of an array differently, e.g.:
+
+ .. math::
+
+ \frac{\sum_i{|a_i - \bar{a}|^2}}{N - 1}
+
+ In statistics, the resulting quantity is sometimes called the "sample
+ variance" because if `a` is a random sample from a larger population,
+ this calculation provides an unbiased estimate of the variance of the
+ population. The use of :math:`N-1` in the denominator is often called
+ "Bessel's correction" because it corrects for bias (toward lower values)
+ in the variance estimate introduced when the sample mean of `a` is used
+ in place of the true mean of the population. For this quantity, use
+ ``ddof=1``.
+
+ Note that for complex numbers, the absolute value is taken before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the variance is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32` (see example
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
+ keyword can alleviate this issue.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.var(a)
+ 1.25
+ >>> np.var(a, axis=0)
+ array([1., 1.])
+ >>> np.var(a, axis=1)
+ array([0.25, 0.25])
+
+ In single precision, var() can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.var(a)
+ np.float32(0.20250003)
+
+ Computing the variance in float64 is more accurate:
+
+ >>> np.var(a, dtype=np.float64)
+ 0.20249999932944759 # may vary
+ >>> ((1-0.55)**2 + (0.1-0.55)**2)/2
+ 0.2025
+
+ Specifying a where argument:
+
+ >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+ >>> np.var(a)
+ 6.833333333333333 # may vary
+ >>> np.var(a, where=[[True], [True], [False]])
+ 4.0
+
+ Using the mean keyword to save computation time:
+
+ >>> import numpy as np
+ >>> from timeit import timeit
+ >>>
+ >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+ >>> mean = np.mean(a, axis=1, keepdims=True)
+ >>>
+ >>> g = globals()
+ >>> n = 10000
+ >>> t1 = timeit("var = np.var(a, axis=1, mean=mean)", globals=g, number=n)
+ >>> t2 = timeit("var = np.var(a, axis=1)", globals=g, number=n)
+ >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%')
+ #doctest: +SKIP
+ Percentage execution time saved 32%
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if where is not np._NoValue:
+ kwargs['where'] = where
+ if mean is not np._NoValue:
+ kwargs['mean'] = mean
+
+ if correction != np._NoValue:
+ if ddof != 0:
+ raise ValueError(
+ "ddof and correction can't be provided simultaneously."
+ )
+ else:
+ ddof = correction
+
+ if type(a) is not mu.ndarray:
+ try:
+ var = a.var
+
+ except AttributeError:
+ pass
+ else:
+ return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+ return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.pyi
new file mode 100644
index 00000000..f0f83093
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/fromnumeric.pyi
@@ -0,0 +1,1750 @@
+# ruff: noqa: ANN401
+from collections.abc import Sequence
+from typing import (
+ Any,
+ Literal,
+ Never,
+ Protocol,
+ SupportsIndex,
+ TypeAlias,
+ TypeVar,
+ overload,
+ type_check_only,
+)
+
+from _typeshed import Incomplete
+from typing_extensions import deprecated
+
+import numpy as np
+from numpy import (
+ _AnyShapeT,
+ _CastingKind,
+ _ModeKind,
+ _OrderACF,
+ _OrderKACF,
+ _PartitionKind,
+ _SortKind,
+ _SortSide,
+ complexfloating,
+ float16,
+ floating,
+ generic,
+ int64,
+ int_,
+ intp,
+ object_,
+ timedelta64,
+ uint64,
+)
+from numpy._globals import _NoValueType
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NDArray,
+ _AnyShape,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt,
+ _ArrayLikeInt_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeUInt_co,
+ _BoolLike_co,
+ _ComplexLike_co,
+ _DTypeLike,
+ _IntLike_co,
+ _NestedSequence,
+ _NumberLike_co,
+ _ScalarLike_co,
+ _ShapeLike,
+)
+
+__all__ = [
+ "all",
+ "amax",
+ "amin",
+ "any",
+ "argmax",
+ "argmin",
+ "argpartition",
+ "argsort",
+ "around",
+ "choose",
+ "clip",
+ "compress",
+ "cumprod",
+ "cumsum",
+ "cumulative_prod",
+ "cumulative_sum",
+ "diagonal",
+ "mean",
+ "max",
+ "min",
+ "matrix_transpose",
+ "ndim",
+ "nonzero",
+ "partition",
+ "prod",
+ "ptp",
+ "put",
+ "ravel",
+ "repeat",
+ "reshape",
+ "resize",
+ "round",
+ "searchsorted",
+ "shape",
+ "size",
+ "sort",
+ "squeeze",
+ "std",
+ "sum",
+ "swapaxes",
+ "take",
+ "trace",
+ "transpose",
+ "var",
+]
+
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_)
+_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any])
+_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])
+_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True)
+_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool])
+
+@type_check_only
+class _SupportsShape(Protocol[_ShapeT_co]):
+ # NOTE: it matters that `self` is positional only
+ @property
+ def shape(self, /) -> _ShapeT_co: ...
+
+# a "sequence" that isn't a string, bytes, bytearray, or memoryview
+_T = TypeVar("_T")
+_PyArray: TypeAlias = list[_T] | tuple[_T, ...]
+# `int` also covers `bool`
+_PyScalar: TypeAlias = complex | bytes | str
+
+@overload
+def take(
+ a: _ArrayLike[_ScalarT],
+ indices: _IntLike_co,
+ axis: None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> _ScalarT: ...
+@overload
+def take(
+ a: ArrayLike,
+ indices: _IntLike_co,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> Any: ...
+@overload
+def take(
+ a: _ArrayLike[_ScalarT],
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def take(
+ a: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def take(
+ a: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None,
+ out: _ArrayT,
+ mode: _ModeKind = ...,
+) -> _ArrayT: ...
+@overload
+def take(
+ a: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ *,
+ out: _ArrayT,
+ mode: _ModeKind = ...,
+) -> _ArrayT: ...
+
+@overload
+def reshape( # shape: index
+ a: _ArrayLike[_ScalarT],
+ /,
+ shape: SupportsIndex,
+ order: _OrderACF = "C",
+ *,
+ copy: bool | None = None,
+) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ...
+@overload
+def reshape( # shape: (int, ...) @ _AnyShapeT
+ a: _ArrayLike[_ScalarT],
+ /,
+ shape: _AnyShapeT,
+ order: _OrderACF = "C",
+ *,
+ copy: bool | None = None,
+) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ...
+@overload # shape: Sequence[index]
+def reshape(
+ a: _ArrayLike[_ScalarT],
+ /,
+ shape: Sequence[SupportsIndex],
+ order: _OrderACF = "C",
+ *,
+ copy: bool | None = None,
+) -> NDArray[_ScalarT]: ...
+@overload # shape: index
+def reshape(
+ a: ArrayLike,
+ /,
+ shape: SupportsIndex,
+ order: _OrderACF = "C",
+ *,
+ copy: bool | None = None,
+) -> np.ndarray[tuple[int], np.dtype]: ...
+@overload
+def reshape( # shape: (int, ...) @ _AnyShapeT
+ a: ArrayLike,
+ /,
+ shape: _AnyShapeT,
+ order: _OrderACF = "C",
+ *,
+ copy: bool | None = None,
+) -> np.ndarray[_AnyShapeT, np.dtype]: ...
+@overload # shape: Sequence[index]
+def reshape(
+ a: ArrayLike,
+ /,
+ shape: Sequence[SupportsIndex],
+ order: _OrderACF = "C",
+ *,
+ copy: bool | None = None,
+) -> NDArray[Any]: ...
+@overload
+@deprecated(
+ "`newshape` keyword argument is deprecated, "
+ "use `shape=...` or pass shape positionally instead. "
+ "(deprecated in NumPy 2.1)",
+)
+def reshape(
+ a: ArrayLike,
+ /,
+ shape: None = None,
+ order: _OrderACF = "C",
+ *,
+ newshape: _ShapeLike,
+ copy: bool | None = None,
+) -> NDArray[Any]: ...
+
+@overload
+def choose(
+ a: _IntLike_co,
+ choices: ArrayLike,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> Any: ...
+@overload
+def choose(
+ a: _ArrayLikeInt_co,
+ choices: _ArrayLike[_ScalarT],
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def choose(
+ a: _ArrayLikeInt_co,
+ choices: ArrayLike,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def choose(
+ a: _ArrayLikeInt_co,
+ choices: ArrayLike,
+ out: _ArrayT,
+ mode: _ModeKind = ...,
+) -> _ArrayT: ...
+
+@overload
+def repeat(
+ a: _ArrayLike[_ScalarT],
+ repeats: _ArrayLikeInt_co,
+ axis: None = None,
+) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ...
+@overload
+def repeat(
+ a: _ArrayLike[_ScalarT],
+ repeats: _ArrayLikeInt_co,
+ axis: SupportsIndex,
+) -> NDArray[_ScalarT]: ...
+@overload
+def repeat(
+ a: ArrayLike,
+ repeats: _ArrayLikeInt_co,
+ axis: None = None,
+) -> np.ndarray[tuple[int], np.dtype[Any]]: ...
+@overload
+def repeat(
+ a: ArrayLike,
+ repeats: _ArrayLikeInt_co,
+ axis: SupportsIndex,
+) -> NDArray[Any]: ...
+
+def put(
+ a: NDArray[Any],
+ ind: _ArrayLikeInt_co,
+ v: ArrayLike,
+ mode: _ModeKind = ...,
+) -> None: ...
+
+@overload
+def swapaxes(
+ a: _ArrayLike[_ScalarT],
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+) -> NDArray[_ScalarT]: ...
+@overload
+def swapaxes(
+ a: ArrayLike,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+) -> NDArray[Any]: ...
+
+@overload
+def transpose(
+ a: _ArrayLike[_ScalarT],
+ axes: _ShapeLike | None = ...
+) -> NDArray[_ScalarT]: ...
+@overload
+def transpose(
+ a: ArrayLike,
+ axes: _ShapeLike | None = ...
+) -> NDArray[Any]: ...
+
+@overload
+def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ...
+@overload
+def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ...
+
+#
+@overload
+def partition(
+ a: _ArrayLike[_ScalarT],
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex | None = -1,
+ kind: _PartitionKind = "introselect",
+ order: None = None,
+) -> NDArray[_ScalarT]: ...
+@overload
+def partition(
+ a: _ArrayLike[np.void],
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex | None = -1,
+ kind: _PartitionKind = "introselect",
+ order: str | Sequence[str] | None = None,
+) -> NDArray[np.void]: ...
+@overload
+def partition(
+ a: ArrayLike,
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex | None = -1,
+ kind: _PartitionKind = "introselect",
+ order: str | Sequence[str] | None = None,
+) -> NDArray[Any]: ...
+
+#
+def argpartition(
+ a: ArrayLike,
+ kth: _ArrayLikeInt,
+ axis: SupportsIndex | None = -1,
+ kind: _PartitionKind = "introselect",
+ order: str | Sequence[str] | None = None,
+) -> NDArray[intp]: ...
+
+#
+@overload
+def sort(
+ a: _ArrayLike[_ScalarT],
+ axis: SupportsIndex | None = ...,
+ kind: _SortKind | None = ...,
+ order: str | Sequence[str] | None = ...,
+ *,
+ stable: bool | None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def sort(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ kind: _SortKind | None = ...,
+ order: str | Sequence[str] | None = ...,
+ *,
+ stable: bool | None = ...,
+) -> NDArray[Any]: ...
+
+def argsort(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ kind: _SortKind | None = ...,
+ order: str | Sequence[str] | None = ...,
+ *,
+ stable: bool | None = ...,
+) -> NDArray[intp]: ...
+
+@overload
+def argmax(
+ a: ArrayLike,
+ axis: None = ...,
+ out: None = ...,
+ *,
+ keepdims: Literal[False] = ...,
+) -> intp: ...
+@overload
+def argmax(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ *,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def argmax(
+ a: ArrayLike,
+ axis: SupportsIndex | None,
+ out: _BoolOrIntArrayT,
+ *,
+ keepdims: bool = ...,
+) -> _BoolOrIntArrayT: ...
+@overload
+def argmax(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ *,
+ out: _BoolOrIntArrayT,
+ keepdims: bool = ...,
+) -> _BoolOrIntArrayT: ...
+
+@overload
+def argmin(
+ a: ArrayLike,
+ axis: None = ...,
+ out: None = ...,
+ *,
+ keepdims: Literal[False] = ...,
+) -> intp: ...
+@overload
+def argmin(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+ *,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def argmin(
+ a: ArrayLike,
+ axis: SupportsIndex | None,
+ out: _BoolOrIntArrayT,
+ *,
+ keepdims: bool = ...,
+) -> _BoolOrIntArrayT: ...
+@overload
+def argmin(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ *,
+ out: _BoolOrIntArrayT,
+ keepdims: bool = ...,
+) -> _BoolOrIntArrayT: ...
+
+@overload
+def searchsorted(
+ a: ArrayLike,
+ v: _ScalarLike_co,
+ side: _SortSide = ...,
+ sorter: _ArrayLikeInt_co | None = ..., # 1D int array
+) -> intp: ...
+@overload
+def searchsorted(
+ a: ArrayLike,
+ v: ArrayLike,
+ side: _SortSide = ...,
+ sorter: _ArrayLikeInt_co | None = ..., # 1D int array
+) -> NDArray[intp]: ...
+
+#
+@overload
+def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ...
+@overload
+def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ...
+@overload
+def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ...
+@overload
+def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ...
+@overload
+def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ...
+@overload
+def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ...
+
+@overload
+def squeeze(
+ a: _ScalarT,
+ axis: _ShapeLike | None = ...,
+) -> _ScalarT: ...
+@overload
+def squeeze(
+ a: _ArrayLike[_ScalarT],
+ axis: _ShapeLike | None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def squeeze(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def diagonal(
+ a: _ArrayLike[_ScalarT],
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ..., # >= 2D array
+) -> NDArray[_ScalarT]: ...
+@overload
+def diagonal(
+ a: ArrayLike,
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ..., # >= 2D array
+) -> NDArray[Any]: ...
+
+@overload
+def trace(
+ a: ArrayLike, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+) -> Any: ...
+@overload
+def trace(
+ a: ArrayLike, # >= 2D array
+ offset: SupportsIndex,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+ dtype: DTypeLike,
+ out: _ArrayT,
+) -> _ArrayT: ...
+@overload
+def trace(
+ a: ArrayLike, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ *,
+ out: _ArrayT,
+) -> _ArrayT: ...
+
+_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]]
+
+@overload
+def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ...
+@overload
+def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ...
+@overload
+def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ...
+@overload
+def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ...
+@overload
+def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ...
+@overload
+def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ...
+@overload
+def ravel(
+ a: complex | _NestedSequence[complex],
+ order: _OrderKACF = "C",
+) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ...
+@overload
+def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ...
+
+def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ...
+
+# this prevents `Any` from being returned with Pyright
+@overload
+def shape(a: _SupportsShape[Never]) -> _AnyShape: ...
+@overload
+def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ...
+@overload
+def shape(a: _PyScalar) -> tuple[()]: ...
+# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are
+# subtypes of it, which would make the return types incompatible.
+@overload
+def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ...
+@overload
+def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ...
+# this overload will be skipped by typecheckers that don't support PEP 688
+@overload
+def shape(a: memoryview | bytearray) -> tuple[int]: ...
+@overload
+def shape(a: ArrayLike) -> _AnyShape: ...
+
+@overload
+def compress(
+ condition: _ArrayLikeBool_co, # 1D bool array
+ a: _ArrayLike[_ScalarT],
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def compress(
+ condition: _ArrayLikeBool_co, # 1D bool array
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def compress(
+ condition: _ArrayLikeBool_co, # 1D bool array
+ a: ArrayLike,
+ axis: SupportsIndex | None,
+ out: _ArrayT,
+) -> _ArrayT: ...
+@overload
+def compress(
+ condition: _ArrayLikeBool_co, # 1D bool array
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ *,
+ out: _ArrayT,
+) -> _ArrayT: ...
+
+@overload
+def clip(
+ a: _ScalarT,
+ a_min: ArrayLike | None,
+ a_max: ArrayLike | None,
+ out: None = ...,
+ *,
+ min: ArrayLike | None = ...,
+ max: ArrayLike | None = ...,
+ dtype: None = ...,
+ where: _ArrayLikeBool_co | None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[str | None, ...] = ...,
+ casting: _CastingKind = ...,
+) -> _ScalarT: ...
+@overload
+def clip(
+ a: _ScalarLike_co,
+ a_min: ArrayLike | None,
+ a_max: ArrayLike | None,
+ out: None = ...,
+ *,
+ min: ArrayLike | None = ...,
+ max: ArrayLike | None = ...,
+ dtype: None = ...,
+ where: _ArrayLikeBool_co | None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[str | None, ...] = ...,
+ casting: _CastingKind = ...,
+) -> Any: ...
+@overload
+def clip(
+ a: _ArrayLike[_ScalarT],
+ a_min: ArrayLike | None,
+ a_max: ArrayLike | None,
+ out: None = ...,
+ *,
+ min: ArrayLike | None = ...,
+ max: ArrayLike | None = ...,
+ dtype: None = ...,
+ where: _ArrayLikeBool_co | None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[str | None, ...] = ...,
+ casting: _CastingKind = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def clip(
+ a: ArrayLike,
+ a_min: ArrayLike | None,
+ a_max: ArrayLike | None,
+ out: None = ...,
+ *,
+ min: ArrayLike | None = ...,
+ max: ArrayLike | None = ...,
+ dtype: None = ...,
+ where: _ArrayLikeBool_co | None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[str | None, ...] = ...,
+ casting: _CastingKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def clip(
+ a: ArrayLike,
+ a_min: ArrayLike | None,
+ a_max: ArrayLike | None,
+ out: _ArrayT,
+ *,
+ min: ArrayLike | None = ...,
+ max: ArrayLike | None = ...,
+ dtype: DTypeLike = ...,
+ where: _ArrayLikeBool_co | None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[str | None, ...] = ...,
+ casting: _CastingKind = ...,
+) -> _ArrayT: ...
+@overload
+def clip(
+ a: ArrayLike,
+ a_min: ArrayLike | None,
+ a_max: ArrayLike | None,
+ out: ArrayLike = ...,
+ *,
+ min: ArrayLike | None = ...,
+ max: ArrayLike | None = ...,
+ dtype: DTypeLike,
+ where: _ArrayLikeBool_co | None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[str | None, ...] = ...,
+ casting: _CastingKind = ...,
+) -> Any: ...
+
+@overload
+def sum(
+ a: _ArrayLike[_ScalarT],
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT: ...
+@overload
+def sum(
+ a: _ArrayLike[_ScalarT],
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT | NDArray[_ScalarT]: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: _ShapeLike | None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT | NDArray[_ScalarT]: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT | NDArray[_ScalarT]: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike = ...,
+ *,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+
+# keep in sync with `any`
+@overload
+def all(
+ a: ArrayLike | None,
+ axis: None = None,
+ out: None = None,
+ keepdims: Literal[False, 0] | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> np.bool: ...
+@overload
+def all(
+ a: ArrayLike | None,
+ axis: int | tuple[int, ...] | None = None,
+ out: None = None,
+ keepdims: _BoolLike_co | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> Incomplete: ...
+@overload
+def all(
+ a: ArrayLike | None,
+ axis: int | tuple[int, ...] | None,
+ out: _ArrayT,
+ keepdims: _BoolLike_co | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ArrayT: ...
+@overload
+def all(
+ a: ArrayLike | None,
+ axis: int | tuple[int, ...] | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: _BoolLike_co | _NoValueType = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ArrayT: ...
+
+# keep in sync with `all`
+@overload
+def any(
+ a: ArrayLike | None,
+ axis: None = None,
+ out: None = None,
+ keepdims: Literal[False, 0] | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> np.bool: ...
+@overload
+def any(
+ a: ArrayLike | None,
+ axis: int | tuple[int, ...] | None = None,
+ out: None = None,
+ keepdims: _BoolLike_co | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> Incomplete: ...
+@overload
+def any(
+ a: ArrayLike | None,
+ axis: int | tuple[int, ...] | None,
+ out: _ArrayT,
+ keepdims: _BoolLike_co | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ArrayT: ...
+@overload
+def any(
+ a: ArrayLike | None,
+ axis: int | tuple[int, ...] | None = None,
+ *,
+ out: _ArrayT,
+ keepdims: _BoolLike_co | _NoValueType = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ArrayT: ...
+
+#
+@overload
+def cumsum(
+ a: _ArrayLike[_ScalarT],
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: SupportsIndex | None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: SupportsIndex | None,
+ dtype: DTypeLike,
+ out: _ArrayT,
+) -> _ArrayT: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ *,
+ out: _ArrayT,
+) -> _ArrayT: ...
+
+@overload
+def cumulative_sum(
+ x: _ArrayLike[_ScalarT],
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumulative_sum(
+ x: ArrayLike,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumulative_sum(
+ x: ArrayLike,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumulative_sum(
+ x: ArrayLike,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumulative_sum(
+ x: ArrayLike,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayT,
+ include_initial: bool = ...,
+) -> _ArrayT: ...
+
+@overload
+def ptp(
+ a: _ArrayLike[_ScalarT],
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+) -> _ScalarT: ...
+@overload
+def ptp(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def ptp(
+ a: ArrayLike,
+ axis: _ShapeLike | None,
+ out: _ArrayT,
+ keepdims: bool = ...,
+) -> _ArrayT: ...
+@overload
+def ptp(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ *,
+ out: _ArrayT,
+ keepdims: bool = ...,
+) -> _ArrayT: ...
+
+@overload
+def amax(
+ a: _ArrayLike[_ScalarT],
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT: ...
+@overload
+def amax(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def amax(
+ a: ArrayLike,
+ axis: _ShapeLike | None,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+@overload
+def amax(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ *,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+
+@overload
+def amin(
+ a: _ArrayLike[_ScalarT],
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT: ...
+@overload
+def amin(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def amin(
+ a: ArrayLike,
+ axis: _ShapeLike | None,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+@overload
+def amin(
+ a: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ *,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+
+# TODO: `np.prod()``: For object arrays `initial` does not necessarily
+# have to be a numerical scalar.
+# The only requirement is that it is compatible
+# with the `.__mul__()` method(s) of the passed array's elements.
+
+# Note that the same situation holds for all wrappers around
+# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
+@overload
+def prod(
+ a: _ArrayLikeBool_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> int_: ...
+@overload
+def prod(
+ a: _ArrayLikeUInt_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> uint64: ...
+@overload
+def prod(
+ a: _ArrayLikeInt_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> int64: ...
+@overload
+def prod(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> floating: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> complexfloating: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ScalarT: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike | None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike | None,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike | None = ...,
+ *,
+ out: _ArrayT,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayT: ...
+
+@overload
+def cumprod(
+ a: _ArrayLikeBool_co,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[int_]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeUInt_co,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[uint64]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[int64]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeFloat_co,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[floating]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[complexfloating]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeObject_co,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: SupportsIndex | None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: SupportsIndex | None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: SupportsIndex | None,
+ dtype: DTypeLike,
+ out: _ArrayT,
+) -> _ArrayT: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ *,
+ out: _ArrayT,
+) -> _ArrayT: ...
+
+@overload
+def cumulative_prod(
+ x: _ArrayLikeBool_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[int_]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeUInt_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[uint64]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeInt_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[int64]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeFloat_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[floating]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeComplex_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[complexfloating]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeObject_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[object_]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ include_initial: bool = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumulative_prod(
+ x: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ /,
+ *,
+ axis: SupportsIndex | None = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayT,
+ include_initial: bool = ...,
+) -> _ArrayT: ...
+
+def ndim(a: ArrayLike) -> int: ...
+
+def size(a: ArrayLike, axis: int | None = ...) -> int: ...
+
+@overload
+def around(
+ a: _BoolLike_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> float16: ...
+@overload
+def around(
+ a: _NumberOrObjectT,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> _NumberOrObjectT: ...
+@overload
+def around(
+ a: _ComplexLike_co | object_,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> Any: ...
+@overload
+def around(
+ a: _ArrayLikeBool_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[float16]: ...
+@overload
+def around(
+ a: _ArrayLike[_NumberOrObjectT],
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[_NumberOrObjectT]: ...
+@overload
+def around(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def around(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ decimals: SupportsIndex,
+ out: _ArrayT,
+) -> _ArrayT: ...
+@overload
+def around(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ decimals: SupportsIndex = ...,
+ *,
+ out: _ArrayT,
+) -> _ArrayT: ...
+
+@overload
+def mean(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> floating: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> complexfloating: ...
+@overload
+def mean(
+ a: _ArrayLike[np.timedelta64],
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> timedelta64: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike,
+ out: _ArrayT,
+ keepdims: bool | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ArrayT: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike | None = ...,
+ *,
+ out: _ArrayT,
+ keepdims: bool | _NoValueType = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ArrayT: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: Literal[False] | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ScalarT: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: Literal[False] | _NoValueType = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ScalarT: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None,
+ keepdims: Literal[True, 1],
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ *,
+ keepdims: bool | _NoValueType = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ScalarT | NDArray[_ScalarT]: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ keepdims: bool | _NoValueType = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> _ScalarT | NDArray[_ScalarT]: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike | None = ...,
+ out: None = ...,
+ keepdims: bool | _NoValueType = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+) -> Incomplete: ...
+
+@overload
+def std(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> floating: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> Any: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ScalarT: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ScalarT: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> Any: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike,
+ out: _ArrayT,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ArrayT: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike = ...,
+ *,
+ out: _ArrayT,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ArrayT: ...
+
+@overload
+def var(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> floating: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> Any: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ScalarT: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ScalarT: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> Any: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None,
+ dtype: DTypeLike,
+ out: _ArrayT,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ArrayT: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ dtype: DTypeLike = ...,
+ *,
+ out: _ArrayT,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ where: _ArrayLikeBool_co | _NoValueType = ...,
+ mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,
+ correction: float | _NoValueType = ...,
+) -> _ArrayT: ...
+
+max = amax
+min = amin
+round = around
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/function_base.py b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.py
new file mode 100644
index 00000000..12ab2a7e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.py
@@ -0,0 +1,545 @@
+import functools
+import operator
+import types
+import warnings
+
+import numpy as np
+from numpy._core import overrides
+from numpy._core._multiarray_umath import _array_converter
+from numpy._core.multiarray import add_docstring
+
+from . import numeric as _nx
+from .numeric import asanyarray, nan, ndim, result_type
+
+__all__ = ['logspace', 'linspace', 'geomspace']
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
+ dtype=None, axis=None, *, device=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_linspace_dispatcher)
+def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
+ axis=0, *, device=None):
+ """
+ Return evenly spaced numbers over a specified interval.
+
+ Returns `num` evenly spaced samples, calculated over the
+ interval [`start`, `stop`].
+
+ The endpoint of the interval can optionally be excluded.
+
+ .. versionchanged:: 1.20.0
+ Values are rounded towards ``-inf`` instead of ``0`` when an
+ integer ``dtype`` is specified. The old behavior can
+ still be obtained with ``np.linspace(start, stop, num).astype(int)``
+
+ Parameters
+ ----------
+ start : array_like
+ The starting value of the sequence.
+ stop : array_like
+ The end value of the sequence, unless `endpoint` is set to False.
+ In that case, the sequence consists of all but the last of ``num + 1``
+ evenly spaced samples, so that `stop` is excluded. Note that the step
+ size changes when `endpoint` is False.
+ num : int, optional
+ Number of samples to generate. Default is 50. Must be non-negative.
+ endpoint : bool, optional
+ If True, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ retstep : bool, optional
+ If True, return (`samples`, `step`), where `step` is the spacing
+ between samples.
+ dtype : dtype, optional
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred dtype will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+ device : str, optional
+ The device on which to place the created array. Default: None.
+ For Array-API interoperability only, so must be ``"cpu"`` if passed.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ samples : ndarray
+ There are `num` equally spaced samples in the closed interval
+ ``[start, stop]`` or the half-open interval ``[start, stop)``
+ (depending on whether `endpoint` is True or False).
+ step : float, optional
+ Only returned if `retstep` is True
+
+ Size of spacing between samples.
+
+
+ See Also
+ --------
+ arange : Similar to `linspace`, but uses a step size (instead of the
+ number of samples).
+ geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
+ scale (a geometric progression).
+ logspace : Similar to `geomspace`, but with the end points specified as
+ logarithms.
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.linspace(2.0, 3.0, num=5)
+ array([2. , 2.25, 2.5 , 2.75, 3. ])
+ >>> np.linspace(2.0, 3.0, num=5, endpoint=False)
+ array([2. , 2.2, 2.4, 2.6, 2.8])
+ >>> np.linspace(2.0, 3.0, num=5, retstep=True)
+ (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
+
+ Graphical illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 8
+ >>> y = np.zeros(N)
+ >>> x1 = np.linspace(0, 10, N, endpoint=True)
+ >>> x2 = np.linspace(0, 10, N, endpoint=False)
+ >>> plt.plot(x1, y, 'o')
+ []
+ >>> plt.plot(x2, y + 0.5, 'o')
+ []
+ >>> plt.ylim([-0.5, 1])
+ (-0.5, 1)
+ >>> plt.show()
+
+ """
+ num = operator.index(num)
+ if num < 0:
+ raise ValueError(
+ f"Number of samples, {num}, must be non-negative."
+ )
+ div = (num - 1) if endpoint else num
+
+ conv = _array_converter(start, stop)
+ start, stop = conv.as_arrays()
+ dt = conv.result_type(ensure_inexact=True)
+
+ if dtype is None:
+ dtype = dt
+ integer_dtype = False
+ else:
+ integer_dtype = _nx.issubdtype(dtype, _nx.integer)
+
+ # Use `dtype=type(dt)` to enforce a floating point evaluation:
+ delta = np.subtract(stop, start, dtype=type(dt))
+ y = _nx.arange(
+ 0, num, dtype=dt, device=device
+ ).reshape((-1,) + (1,) * ndim(delta))
+
+ # In-place multiplication y *= delta/div is faster, but prevents
+ # the multiplicant from overriding what class is produced, and thus
+ # prevents, e.g. use of Quantities, see gh-7142. Hence, we multiply
+ # in place only for standard scalar types.
+ if div > 0:
+ _mult_inplace = _nx.isscalar(delta)
+ step = delta / div
+ any_step_zero = (
+ step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any())
+ if any_step_zero:
+ # Special handling for denormal numbers, gh-5437
+ y /= div
+ if _mult_inplace:
+ y *= delta
+ else:
+ y = y * delta
+ elif _mult_inplace:
+ y *= step
+ else:
+ y = y * step
+ else:
+ # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
+ # have an undefined step
+ step = nan
+ # Multiply with delta to allow possible override of output class.
+ y = y * delta
+
+ y += start
+
+ if endpoint and num > 1:
+ y[-1, ...] = stop
+
+ if axis != 0:
+ y = _nx.moveaxis(y, 0, axis)
+
+ if integer_dtype:
+ _nx.floor(y, out=y)
+
+ y = conv.wrap(y.astype(dtype, copy=False))
+ if retstep:
+ return y, step
+ else:
+ return y
+
+
+def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
+ dtype=None, axis=None):
+ return (start, stop, base)
+
+
+@array_function_dispatch(_logspace_dispatcher)
+def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
+ axis=0):
+ """
+ Return numbers spaced evenly on a log scale.
+
+ In linear space, the sequence starts at ``base ** start``
+ (`base` to the power of `start`) and ends with ``base ** stop``
+ (see `endpoint` below).
+
+ .. versionchanged:: 1.25.0
+ Non-scalar 'base` is now supported
+
+ Parameters
+ ----------
+ start : array_like
+ ``base ** start`` is the starting value of the sequence.
+ stop : array_like
+ ``base ** stop`` is the final value of the sequence, unless `endpoint`
+ is False. In that case, ``num + 1`` values are spaced over the
+ interval in log-space, of which all but the last (a sequence of
+ length `num`) are returned.
+ num : integer, optional
+ Number of samples to generate. Default is 50.
+ endpoint : boolean, optional
+ If true, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ base : array_like, optional
+ The base of the log space. The step size between the elements in
+ ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
+ Default is 10.0.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred type will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start,
+ stop, or base are array-like. By default (0), the samples will be
+ along a new axis inserted at the beginning. Use -1 to get an axis at
+ the end.
+
+ Returns
+ -------
+ samples : ndarray
+ `num` samples, equally spaced on a log scale.
+
+ See Also
+ --------
+ arange : Similar to linspace, with the step size specified instead of the
+ number of samples. Note that, when used with a float endpoint, the
+ endpoint may or may not be included.
+ linspace : Similar to logspace, but with the samples uniformly distributed
+ in linear space, instead of log space.
+ geomspace : Similar to logspace, but with endpoints specified directly.
+ :ref:`how-to-partition`
+
+ Notes
+ -----
+ If base is a scalar, logspace is equivalent to the code
+
+ >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
+ ... # doctest: +SKIP
+ >>> power(base, y).astype(dtype)
+ ... # doctest: +SKIP
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.logspace(2.0, 3.0, num=4)
+ array([ 100. , 215.443469 , 464.15888336, 1000. ])
+ >>> np.logspace(2.0, 3.0, num=4, endpoint=False)
+ array([100. , 177.827941 , 316.22776602, 562.34132519])
+ >>> np.logspace(2.0, 3.0, num=4, base=2.0)
+ array([4. , 5.0396842 , 6.34960421, 8. ])
+ >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1)
+ array([[ 4. , 5.0396842 , 6.34960421, 8. ],
+ [ 9. , 12.98024613, 18.72075441, 27. ]])
+
+ Graphical illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 10
+ >>> x1 = np.logspace(0.1, 1, N, endpoint=True)
+ >>> x2 = np.logspace(0.1, 1, N, endpoint=False)
+ >>> y = np.zeros(N)
+ >>> plt.plot(x1, y, 'o')
+ []
+ >>> plt.plot(x2, y + 0.5, 'o')
+ []
+ >>> plt.ylim([-0.5, 1])
+ (-0.5, 1)
+ >>> plt.show()
+
+ """
+ if not isinstance(base, (float, int)) and np.ndim(base):
+ # If base is non-scalar, broadcast it with the others, since it
+ # may influence how axis is interpreted.
+ ndmax = np.broadcast(start, stop, base).ndim
+ start, stop, base = (
+ np.array(a, copy=None, subok=True, ndmin=ndmax)
+ for a in (start, stop, base)
+ )
+ base = np.expand_dims(base, axis=axis)
+ y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
+ if dtype is None:
+ return _nx.power(base, y)
+ return _nx.power(base, y).astype(dtype, copy=False)
+
+
+def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
+ axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_geomspace_dispatcher)
+def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
+ """
+ Return numbers spaced evenly on a log scale (a geometric progression).
+
+ This is similar to `logspace`, but with endpoints specified directly.
+ Each output sample is a constant multiple of the previous.
+
+ Parameters
+ ----------
+ start : array_like
+ The starting value of the sequence.
+ stop : array_like
+ The final value of the sequence, unless `endpoint` is False.
+ In that case, ``num + 1`` values are spaced over the
+ interval in log-space, of which all but the last (a sequence of
+ length `num`) are returned.
+ num : integer, optional
+ Number of samples to generate. Default is 50.
+ endpoint : boolean, optional
+ If true, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred dtype will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ Returns
+ -------
+ samples : ndarray
+ `num` samples, equally spaced on a log scale.
+
+ See Also
+ --------
+ logspace : Similar to geomspace, but with endpoints specified using log
+ and base.
+ linspace : Similar to geomspace, but with arithmetic instead of geometric
+ progression.
+ arange : Similar to linspace, with the step size specified instead of the
+ number of samples.
+ :ref:`how-to-partition`
+
+ Notes
+ -----
+ If the inputs or dtype are complex, the output will follow a logarithmic
+ spiral in the complex plane. (There are an infinite number of spirals
+ passing through two points; the output will follow the shortest such path.)
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.geomspace(1, 1000, num=4)
+ array([ 1., 10., 100., 1000.])
+ >>> np.geomspace(1, 1000, num=3, endpoint=False)
+ array([ 1., 10., 100.])
+ >>> np.geomspace(1, 1000, num=4, endpoint=False)
+ array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
+ >>> np.geomspace(1, 256, num=9)
+ array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
+
+ Note that the above may not produce exact integers:
+
+ >>> np.geomspace(1, 256, num=9, dtype=int)
+ array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
+ >>> np.around(np.geomspace(1, 256, num=9)).astype(int)
+ array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
+
+ Negative, decreasing, and complex inputs are allowed:
+
+ >>> np.geomspace(1000, 1, num=4)
+ array([1000., 100., 10., 1.])
+ >>> np.geomspace(-1000, -1, num=4)
+ array([-1000., -100., -10., -1.])
+ >>> np.geomspace(1j, 1000j, num=4) # Straight line
+ array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
+ >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
+ array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
+ 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
+ 1.00000000e+00+0.00000000e+00j])
+
+ Graphical illustration of `endpoint` parameter:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 10
+ >>> y = np.zeros(N)
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
+ []
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
+ []
+ >>> plt.axis([0.5, 2000, 0, 3])
+ [0.5, 2000, 0, 3]
+ >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
+ >>> plt.show()
+
+ """
+ start = asanyarray(start)
+ stop = asanyarray(stop)
+ if _nx.any(start == 0) or _nx.any(stop == 0):
+ raise ValueError('Geometric sequence cannot include zero')
+
+ dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
+ if dtype is None:
+ dtype = dt
+ else:
+ # complex to dtype('complex128'), for instance
+ dtype = _nx.dtype(dtype)
+
+ # Promote both arguments to the same dtype in case, for instance, one is
+ # complex and another is negative and log would produce NaN otherwise.
+ # Copy since we may change things in-place further down.
+ start = start.astype(dt, copy=True)
+ stop = stop.astype(dt, copy=True)
+
+ # Allow negative real values and ensure a consistent result for complex
+ # (including avoiding negligible real or imaginary parts in output) by
+ # rotating start to positive real, calculating, then undoing rotation.
+ out_sign = _nx.sign(start)
+ start /= out_sign
+ stop = stop / out_sign
+
+ log_start = _nx.log10(start)
+ log_stop = _nx.log10(stop)
+ result = logspace(log_start, log_stop, num=num,
+ endpoint=endpoint, base=10.0, dtype=dt)
+
+ # Make sure the endpoints match the start and stop arguments. This is
+ # necessary because np.exp(np.log(x)) is not necessarily equal to x.
+ if num > 0:
+ result[0] = start
+ if num > 1 and endpoint:
+ result[-1] = stop
+
+ result *= out_sign
+
+ if axis != 0:
+ result = _nx.moveaxis(result, 0, axis)
+
+ return result.astype(dtype, copy=False)
+
+
+def _needs_add_docstring(obj):
+ """
+ Returns true if the only way to set the docstring of `obj` from python is
+ via add_docstring.
+
+ This function errs on the side of being overly conservative.
+ """
+ Py_TPFLAGS_HEAPTYPE = 1 << 9
+
+ if isinstance(obj, (types.FunctionType, types.MethodType, property)):
+ return False
+
+ if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
+ return False
+
+ return True
+
+
+def _add_docstring(obj, doc, warn_on_python):
+ if warn_on_python and not _needs_add_docstring(obj):
+ warnings.warn(
+ f"add_newdoc was used on a pure-python object {obj}. "
+ "Prefer to attach it directly to the source.",
+ UserWarning,
+ stacklevel=3)
+ try:
+ add_docstring(obj, doc)
+ except Exception:
+ pass
+
+
+def add_newdoc(place, obj, doc, warn_on_python=True):
+ """
+ Add documentation to an existing object, typically one defined in C
+
+ The purpose is to allow easier editing of the docstrings without requiring
+ a re-compile. This exists primarily for internal use within numpy itself.
+
+ Parameters
+ ----------
+ place : str
+ The absolute name of the module to import from
+ obj : str or None
+ The name of the object to add documentation to, typically a class or
+ function name.
+ doc : {str, Tuple[str, str], List[Tuple[str, str]]}
+ If a string, the documentation to apply to `obj`
+
+ If a tuple, then the first element is interpreted as an attribute
+ of `obj` and the second as the docstring to apply -
+ ``(method, docstring)``
+
+ If a list, then each element of the list should be a tuple of length
+ two - ``[(method1, docstring1), (method2, docstring2), ...]``
+ warn_on_python : bool
+ If True, the default, emit `UserWarning` if this is used to attach
+ documentation to a pure-python object.
+
+ Notes
+ -----
+ This routine never raises an error if the docstring can't be written, but
+ will raise an error if the object being documented does not exist.
+
+ This routine cannot modify read-only docstrings, as appear
+ in new-style classes or built-in functions. Because this
+ routine never raises an error the caller must check manually
+ that the docstrings were changed.
+
+ Since this function grabs the ``char *`` from a c-level str object and puts
+ it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
+ C-API best-practices, by:
+
+ - modifying a `PyTypeObject` after calling `PyType_Ready`
+ - calling `Py_INCREF` on the str and losing the reference, so the str
+ will never be released
+
+ If possible it should be avoided.
+ """
+ new = getattr(__import__(place, globals(), {}, [obj]), obj)
+ if isinstance(doc, str):
+ if "${ARRAY_FUNCTION_LIKE}" in doc:
+ doc = overrides.get_array_function_like_doc(new, doc)
+ _add_docstring(new, doc.strip(), warn_on_python)
+ elif isinstance(doc, tuple):
+ attr, docstring = doc
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
+ elif isinstance(doc, list):
+ for attr, docstring in doc:
+ _add_docstring(
+ getattr(new, attr), docstring.strip(), warn_on_python
+ )
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/function_base.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.pyi
new file mode 100644
index 00000000..44d1311f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/function_base.pyi
@@ -0,0 +1,278 @@
+from typing import Literal as L
+from typing import SupportsIndex, TypeAlias, TypeVar, overload
+
+from _typeshed import Incomplete
+
+import numpy as np
+from numpy._typing import (
+ DTypeLike,
+ NDArray,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _DTypeLike,
+)
+from numpy._typing._array_like import _DualArrayLike
+
+__all__ = ["geomspace", "linspace", "logspace"]
+
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+
+_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float]
+
+@overload
+def linspace(
+ start: _ToArrayFloat64,
+ stop: _ToArrayFloat64,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ retstep: L[False] = False,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+ *,
+ device: L["cpu"] | None = None,
+) -> NDArray[np.float64]: ...
+@overload
+def linspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ retstep: L[False] = False,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+ *,
+ device: L["cpu"] | None = None,
+) -> NDArray[np.floating]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ retstep: L[False] = False,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+ *,
+ device: L["cpu"] | None = None,
+) -> NDArray[np.complexfloating]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex,
+ endpoint: bool,
+ retstep: L[False],
+ dtype: _DTypeLike[_ScalarT],
+ axis: SupportsIndex = 0,
+ *,
+ device: L["cpu"] | None = None,
+) -> NDArray[_ScalarT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ retstep: L[False] = False,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ axis: SupportsIndex = 0,
+ device: L["cpu"] | None = None,
+) -> NDArray[_ScalarT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ retstep: L[False] = False,
+ dtype: DTypeLike | None = None,
+ axis: SupportsIndex = 0,
+ *,
+ device: L["cpu"] | None = None,
+) -> NDArray[Incomplete]: ...
+@overload
+def linspace(
+ start: _ToArrayFloat64,
+ stop: _ToArrayFloat64,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ *,
+ retstep: L[True],
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+ device: L["cpu"] | None = None,
+) -> tuple[NDArray[np.float64], np.float64]: ...
+@overload
+def linspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ *,
+ retstep: L[True],
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+ device: L["cpu"] | None = None,
+) -> tuple[NDArray[np.floating], np.floating]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ *,
+ retstep: L[True],
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+ device: L["cpu"] | None = None,
+) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ *,
+ retstep: L[True],
+ dtype: _DTypeLike[_ScalarT],
+ axis: SupportsIndex = 0,
+ device: L["cpu"] | None = None,
+) -> tuple[NDArray[_ScalarT], _ScalarT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ *,
+ retstep: L[True],
+ dtype: DTypeLike | None = None,
+ axis: SupportsIndex = 0,
+ device: L["cpu"] | None = None,
+) -> tuple[NDArray[Incomplete], Incomplete]: ...
+
+@overload
+def logspace(
+ start: _ToArrayFloat64,
+ stop: _ToArrayFloat64,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ base: _ToArrayFloat64 = 10.0,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[np.float64]: ...
+@overload
+def logspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ base: _ArrayLikeFloat_co = 10.0,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[np.floating]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ base: _ArrayLikeComplex_co = 10.0,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[np.complexfloating]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex,
+ endpoint: bool,
+ base: _ArrayLikeComplex_co,
+ dtype: _DTypeLike[_ScalarT],
+ axis: SupportsIndex = 0,
+) -> NDArray[_ScalarT]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ base: _ArrayLikeComplex_co = 10.0,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ axis: SupportsIndex = 0,
+) -> NDArray[_ScalarT]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ base: _ArrayLikeComplex_co = 10.0,
+ dtype: DTypeLike | None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[Incomplete]: ...
+
+@overload
+def geomspace(
+ start: _ToArrayFloat64,
+ stop: _ToArrayFloat64,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[np.float64]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[np.floating]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ dtype: None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[np.complexfloating]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex,
+ endpoint: bool,
+ dtype: _DTypeLike[_ScalarT],
+ axis: SupportsIndex = 0,
+) -> NDArray[_ScalarT]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ axis: SupportsIndex = 0,
+) -> NDArray[_ScalarT]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = 50,
+ endpoint: bool = True,
+ dtype: DTypeLike | None = None,
+ axis: SupportsIndex = 0,
+) -> NDArray[Incomplete]: ...
+
+def add_newdoc(
+ place: str,
+ obj: str,
+ doc: str | tuple[str, str] | list[tuple[str, str]],
+ warn_on_python: bool = True,
+) -> None: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.py b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.py
new file mode 100644
index 00000000..afa2cceb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.py
@@ -0,0 +1,748 @@
+"""Machine limits for Float32 and Float64 and (long double) if available...
+
+"""
+__all__ = ['finfo', 'iinfo']
+
+import types
+import warnings
+
+from numpy._utils import set_module
+
+from . import numeric
+from . import numerictypes as ntypes
+from ._machar import MachAr
+from .numeric import array, inf, nan
+from .umath import exp2, isnan, log10, nextafter
+
+
+def _fr0(a):
+ """fix rank-0 --> rank-1"""
+ if a.ndim == 0:
+ a = a.copy()
+ a.shape = (1,)
+ return a
+
+
+def _fr1(a):
+ """fix rank > 0 --> rank-0"""
+ if a.size == 1:
+ a = a.copy()
+ a.shape = ()
+ return a
+
+
+class MachArLike:
+ """ Object to simulate MachAr instance """
+ def __init__(self, ftype, *, eps, epsneg, huge, tiny,
+ ibeta, smallest_subnormal=None, **kwargs):
+ self.params = _MACHAR_PARAMS[ftype]
+ self.ftype = ftype
+ self.title = self.params['title']
+ # Parameter types same as for discovered MachAr object.
+ if not smallest_subnormal:
+ self._smallest_subnormal = nextafter(
+ self.ftype(0), self.ftype(1), dtype=self.ftype)
+ else:
+ self._smallest_subnormal = smallest_subnormal
+ self.epsilon = self.eps = self._float_to_float(eps)
+ self.epsneg = self._float_to_float(epsneg)
+ self.xmax = self.huge = self._float_to_float(huge)
+ self.xmin = self._float_to_float(tiny)
+ self.smallest_normal = self.tiny = self._float_to_float(tiny)
+ self.ibeta = self.params['itype'](ibeta)
+ self.__dict__.update(kwargs)
+ self.precision = int(-log10(self.eps))
+ self.resolution = self._float_to_float(
+ self._float_conv(10) ** (-self.precision))
+ self._str_eps = self._float_to_str(self.eps)
+ self._str_epsneg = self._float_to_str(self.epsneg)
+ self._str_xmin = self._float_to_str(self.xmin)
+ self._str_xmax = self._float_to_str(self.xmax)
+ self._str_resolution = self._float_to_str(self.resolution)
+ self._str_smallest_normal = self._float_to_str(self.xmin)
+
+ @property
+ def smallest_subnormal(self):
+ """Return the value for the smallest subnormal.
+
+ Returns
+ -------
+ smallest_subnormal : float
+ value for the smallest subnormal.
+
+ Warns
+ -----
+ UserWarning
+ If the calculated value for the smallest subnormal is zero.
+ """
+ # Check that the calculated value is not zero, in case it raises a
+ # warning.
+ value = self._smallest_subnormal
+ if self.ftype(0) == value:
+ warnings.warn(
+ f'The value of the smallest subnormal for {self.ftype} type is zero.',
+ UserWarning, stacklevel=2)
+
+ return self._float_to_float(value)
+
+ @property
+ def _str_smallest_subnormal(self):
+ """Return the string representation of the smallest subnormal."""
+ return self._float_to_str(self.smallest_subnormal)
+
+ def _float_to_float(self, value):
+ """Converts float to float.
+
+ Parameters
+ ----------
+ value : float
+ value to be converted.
+ """
+ return _fr1(self._float_conv(value))
+
+ def _float_conv(self, value):
+ """Converts float to conv.
+
+ Parameters
+ ----------
+ value : float
+ value to be converted.
+ """
+ return array([value], self.ftype)
+
+ def _float_to_str(self, value):
+ """Converts float to str.
+
+ Parameters
+ ----------
+ value : float
+ value to be converted.
+ """
+ return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
+
+
+_convert_to_float = {
+ ntypes.csingle: ntypes.single,
+ ntypes.complex128: ntypes.float64,
+ ntypes.clongdouble: ntypes.longdouble
+ }
+
+# Parameters for creating MachAr / MachAr-like objects
+_title_fmt = 'numpy {} precision floating point number'
+_MACHAR_PARAMS = {
+ ntypes.double: {
+ 'itype': ntypes.int64,
+ 'fmt': '%24.16e',
+ 'title': _title_fmt.format('double')},
+ ntypes.single: {
+ 'itype': ntypes.int32,
+ 'fmt': '%15.7e',
+ 'title': _title_fmt.format('single')},
+ ntypes.longdouble: {
+ 'itype': ntypes.longlong,
+ 'fmt': '%s',
+ 'title': _title_fmt.format('long double')},
+ ntypes.half: {
+ 'itype': ntypes.int16,
+ 'fmt': '%12.5e',
+ 'title': _title_fmt.format('half')}}
+
+# Key to identify the floating point type. Key is result of
+#
+# ftype = np.longdouble # or float64, float32, etc.
+# v = (ftype(-1.0) / ftype(10.0))
+# v.view(v.dtype.newbyteorder('<')).tobytes()
+#
+# Uses division to work around deficiencies in strtold on some platforms.
+# See:
+# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
+
+_KNOWN_TYPES = {}
+def _register_type(machar, bytepat):
+ _KNOWN_TYPES[bytepat] = machar
+
+
+_float_ma = {}
+
+
+def _register_known_types():
+ # Known parameters for float16
+ # See docstring of MachAr class for description of parameters.
+ f16 = ntypes.float16
+ float16_ma = MachArLike(f16,
+ machep=-10,
+ negep=-11,
+ minexp=-14,
+ maxexp=16,
+ it=10,
+ iexp=5,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f16(-10)),
+ epsneg=exp2(f16(-11)),
+ huge=f16(65504),
+ tiny=f16(2 ** -14))
+ _register_type(float16_ma, b'f\xae')
+ _float_ma[16] = float16_ma
+
+ # Known parameters for float32
+ f32 = ntypes.float32
+ float32_ma = MachArLike(f32,
+ machep=-23,
+ negep=-24,
+ minexp=-126,
+ maxexp=128,
+ it=23,
+ iexp=8,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f32(-23)),
+ epsneg=exp2(f32(-24)),
+ huge=f32((1 - 2 ** -24) * 2**128),
+ tiny=exp2(f32(-126)))
+ _register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
+ _float_ma[32] = float32_ma
+
+ # Known parameters for float64
+ f64 = ntypes.float64
+ epsneg_f64 = 2.0 ** -53.0
+ tiny_f64 = 2.0 ** -1022.0
+ float64_ma = MachArLike(f64,
+ machep=-52,
+ negep=-53,
+ minexp=-1022,
+ maxexp=1024,
+ it=52,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=2.0 ** -52.0,
+ epsneg=epsneg_f64,
+ huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
+ tiny=tiny_f64)
+ _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ _float_ma[64] = float64_ma
+
+ # Known parameters for IEEE 754 128-bit binary float
+ ld = ntypes.longdouble
+ epsneg_f128 = exp2(ld(-113))
+ tiny_f128 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f128
+ with numeric.errstate(all='ignore'):
+ huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
+ float128_ma = MachArLike(ld,
+ machep=-112,
+ negep=-113,
+ minexp=-16382,
+ maxexp=16384,
+ it=112,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-112)),
+ epsneg=epsneg_f128,
+ huge=huge_f128,
+ tiny=tiny_f128)
+ # IEEE 754 128-bit binary float
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _float_ma[128] = float128_ma
+
+ # Known parameters for float80 (Intel 80-bit extended precision)
+ epsneg_f80 = exp2(ld(-64))
+ tiny_f80 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f80
+ with numeric.errstate(all='ignore'):
+ huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
+ float80_ma = MachArLike(ld,
+ machep=-63,
+ negep=-64,
+ minexp=-16382,
+ maxexp=16384,
+ it=63,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-63)),
+ epsneg=epsneg_f80,
+ huge=huge_f80,
+ tiny=tiny_f80)
+ # float80, first 10 bytes containing actual storage
+ _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
+ _float_ma[80] = float80_ma
+
+ # Guessed / known parameters for double double; see:
+ # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
+ # These numbers have the same exponent range as float64, but extended
+ # number of digits in the significand.
+ huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
+ # As the smallest_normal in double double is so hard to calculate we set
+ # it to NaN.
+ smallest_normal_dd = nan
+ # Leave the same value for the smallest subnormal as double
+ smallest_subnormal_dd = ld(nextafter(0., 1.))
+ float_dd_ma = MachArLike(ld,
+ machep=-105,
+ negep=-106,
+ minexp=-1022,
+ maxexp=1024,
+ it=105,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-105)),
+ epsneg=exp2(ld(-106)),
+ huge=huge_dd,
+ tiny=smallest_normal_dd,
+ smallest_subnormal=smallest_subnormal_dd)
+ # double double; low, high order (e.g. PPC 64)
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ # double double; high, low order (e.g. PPC 64 le)
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
+ _float_ma['dd'] = float_dd_ma
+
+
+def _get_machar(ftype):
+ """ Get MachAr instance or MachAr-like instance
+
+ Get parameters for floating point type, by first trying signatures of
+ various known floating point types, then, if none match, attempting to
+ identify parameters by analysis.
+
+ Parameters
+ ----------
+ ftype : class
+ Numpy floating point type class (e.g. ``np.float64``)
+
+ Returns
+ -------
+ ma_like : instance of :class:`MachAr` or :class:`MachArLike`
+ Object giving floating point parameters for `ftype`.
+
+ Warns
+ -----
+ UserWarning
+ If the binary signature of the float type is not in the dictionary of
+ known float types.
+ """
+ params = _MACHAR_PARAMS.get(ftype)
+ if params is None:
+ raise ValueError(repr(ftype))
+ # Detect known / suspected types
+ # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold
+ # may be deficient
+ key = (ftype(-1.0) / ftype(10.))
+ key = key.view(key.dtype.newbyteorder("<")).tobytes()
+ ma_like = None
+ if ftype == ntypes.longdouble:
+ # Could be 80 bit == 10 byte extended precision, where last bytes can
+ # be random garbage.
+ # Comparing first 10 bytes to pattern first to avoid branching on the
+ # random garbage.
+ ma_like = _KNOWN_TYPES.get(key[:10])
+ if ma_like is None:
+ # see if the full key is known.
+ ma_like = _KNOWN_TYPES.get(key)
+ if ma_like is None and len(key) == 16:
+ # machine limits could be f80 masquerading as np.float128,
+ # find all keys with length 16 and make new dict, but make the keys
+ # only 10 bytes long, the last bytes can be random garbage
+ _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}
+ ma_like = _kt.get(key[:10])
+ if ma_like is not None:
+ return ma_like
+ # Fall back to parameter discovery
+ warnings.warn(
+ f'Signature {key} for {ftype} does not match any known type: '
+ 'falling back to type probe function.\n'
+ 'This warnings indicates broken support for the dtype!',
+ UserWarning, stacklevel=2)
+ return _discovered_machar(ftype)
+
+
+def _discovered_machar(ftype):
+ """ Create MachAr instance with found information on float types
+
+ TODO: MachAr should be retired completely ideally. We currently only
+ ever use it system with broken longdouble (valgrind, WSL).
+ """
+ params = _MACHAR_PARAMS[ftype]
+ return MachAr(lambda v: array([v], ftype),
+ lambda v: _fr0(v.astype(params['itype']))[0],
+ lambda v: array(_fr0(v)[0], ftype),
+ lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
+ params['title'])
+
+
+@set_module('numpy')
+class finfo:
+ """
+ finfo(dtype)
+
+ Machine limits for floating point types.
+
+ Attributes
+ ----------
+ bits : int
+ The number of bits occupied by the type.
+ dtype : dtype
+ Returns the dtype for which `finfo` returns information. For complex
+ input, the returned dtype is the associated ``float*`` dtype for its
+ real and complex components.
+ eps : float
+ The difference between 1.0 and the next smallest representable float
+ larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
+ standard, ``eps = 2**-52``, approximately 2.22e-16.
+ epsneg : float
+ The difference between 1.0 and the next smallest representable float
+ less than 1.0. For example, for 64-bit binary floats in the IEEE-754
+ standard, ``epsneg = 2**-53``, approximately 1.11e-16.
+ iexp : int
+ The number of bits in the exponent portion of the floating point
+ representation.
+ machep : int
+ The exponent that yields `eps`.
+ max : floating point number of the appropriate type
+ The largest representable number.
+ maxexp : int
+ The smallest positive power of the base (2) that causes overflow.
+ min : floating point number of the appropriate type
+ The smallest representable number, typically ``-max``.
+ minexp : int
+ The most negative power of the base (2) consistent with there
+ being no leading 0's in the mantissa.
+ negep : int
+ The exponent that yields `epsneg`.
+ nexp : int
+ The number of bits in the exponent including its sign and bias.
+ nmant : int
+ The number of bits in the mantissa.
+ precision : int
+ The approximate number of decimal digits to which this kind of
+ float is precise.
+ resolution : floating point number of the appropriate type
+ The approximate decimal resolution of this type, i.e.,
+ ``10**-precision``.
+ tiny : float
+ An alias for `smallest_normal`, kept for backwards compatibility.
+ smallest_normal : float
+ The smallest positive floating point number with 1 as leading bit in
+ the mantissa following IEEE-754 (see Notes).
+ smallest_subnormal : float
+ The smallest positive floating point number with 0 as leading bit in
+ the mantissa following IEEE-754.
+
+ Parameters
+ ----------
+ dtype : float, dtype, or instance
+ Kind of floating point or complex floating point
+ data-type about which to get information.
+
+ See Also
+ --------
+ iinfo : The equivalent for integer data types.
+ spacing : The distance between a value and the nearest adjacent number
+ nextafter : The next floating point value after x1 towards x2
+
+ Notes
+ -----
+ For developers of NumPy: do not instantiate this at the module level.
+ The initial calculation of these parameters is expensive and negatively
+ impacts import times. These objects are cached, so calling ``finfo()``
+ repeatedly inside your functions is not a problem.
+
+ Note that ``smallest_normal`` is not actually the smallest positive
+ representable value in a NumPy floating point type. As in the IEEE-754
+ standard [1]_, NumPy floating point types make use of subnormal numbers to
+ fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
+ may have significantly reduced precision [2]_.
+
+ This function can also be used for complex data types as well. If used,
+ the output will be the same as the corresponding real float type
+ (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).
+ However, the output is true for the real and imaginary components.
+
+ References
+ ----------
+ .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
+ pp.1-70, 2008, https://doi.org/10.1109/IEEESTD.2008.4610935
+ .. [2] Wikipedia, "Denormal Numbers",
+ https://en.wikipedia.org/wiki/Denormal_number
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.finfo(np.float64).dtype
+ dtype('float64')
+ >>> np.finfo(np.complex64).dtype
+ dtype('float32')
+
+ """
+
+ _finfo_cache = {}
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+ def __new__(cls, dtype):
+ try:
+ obj = cls._finfo_cache.get(dtype) # most common path
+ if obj is not None:
+ return obj
+ except TypeError:
+ pass
+
+ if dtype is None:
+ # Deprecated in NumPy 1.25, 2023-01-16
+ warnings.warn(
+ "finfo() dtype cannot be None. This behavior will "
+ "raise an error in the future. (Deprecated in NumPy 1.25)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ try:
+ dtype = numeric.dtype(dtype)
+ except TypeError:
+ # In case a float instance was given
+ dtype = numeric.dtype(type(dtype))
+
+ obj = cls._finfo_cache.get(dtype)
+ if obj is not None:
+ return obj
+ dtypes = [dtype]
+ newdtype = ntypes.obj2sctype(dtype)
+ if newdtype is not dtype:
+ dtypes.append(newdtype)
+ dtype = newdtype
+ if not issubclass(dtype, numeric.inexact):
+ raise ValueError(f"data type {dtype!r} not inexact")
+ obj = cls._finfo_cache.get(dtype)
+ if obj is not None:
+ return obj
+ if not issubclass(dtype, numeric.floating):
+ newdtype = _convert_to_float[dtype]
+ if newdtype is not dtype:
+ # dtype changed, for example from complex128 to float64
+ dtypes.append(newdtype)
+ dtype = newdtype
+
+ obj = cls._finfo_cache.get(dtype, None)
+ if obj is not None:
+ # the original dtype was not in the cache, but the new
+ # dtype is in the cache. we add the original dtypes to
+ # the cache and return the result
+ for dt in dtypes:
+ cls._finfo_cache[dt] = obj
+ return obj
+ obj = object.__new__(cls)._init(dtype)
+ for dt in dtypes:
+ cls._finfo_cache[dt] = obj
+ return obj
+
+ def _init(self, dtype):
+ self.dtype = numeric.dtype(dtype)
+ machar = _get_machar(dtype)
+
+ for word in ['precision', 'iexp',
+ 'maxexp', 'minexp', 'negep',
+ 'machep']:
+ setattr(self, word, getattr(machar, word))
+ for word in ['resolution', 'epsneg', 'smallest_subnormal']:
+ setattr(self, word, getattr(machar, word).flat[0])
+ self.bits = self.dtype.itemsize * 8
+ self.max = machar.huge.flat[0]
+ self.min = -self.max
+ self.eps = machar.eps.flat[0]
+ self.nexp = machar.iexp
+ self.nmant = machar.it
+ self._machar = machar
+ self._str_tiny = machar._str_xmin.strip()
+ self._str_max = machar._str_xmax.strip()
+ self._str_epsneg = machar._str_epsneg.strip()
+ self._str_eps = machar._str_eps.strip()
+ self._str_resolution = machar._str_resolution.strip()
+ self._str_smallest_normal = machar._str_smallest_normal.strip()
+ self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
+ return self
+
+ def __str__(self):
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'precision = %(precision)3s resolution = %(_str_resolution)s\n'
+ 'machep = %(machep)6s eps = %(_str_eps)s\n'
+ 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
+ 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
+ 'maxexp = %(maxexp)6s max = %(_str_max)s\n'
+ 'nexp = %(nexp)6s min = -max\n'
+ 'smallest_normal = %(_str_smallest_normal)s '
+ 'smallest_subnormal = %(_str_smallest_subnormal)s\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
+
+ def __repr__(self):
+ c = self.__class__.__name__
+ d = self.__dict__.copy()
+ d['klass'] = c
+ return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
+ " max=%(_str_max)s, dtype=%(dtype)s)") % d)
+
+ @property
+ def smallest_normal(self):
+ """Return the value for the smallest normal.
+
+ Returns
+ -------
+ smallest_normal : float
+ Value for the smallest normal.
+
+ Warns
+ -----
+ UserWarning
+ If the calculated value for the smallest normal is requested for
+ double-double.
+ """
+ # This check is necessary because the value for smallest_normal is
+ # platform dependent for longdouble types.
+ if isnan(self._machar.smallest_normal.flat[0]):
+ warnings.warn(
+ 'The value of smallest normal is undefined for double double',
+ UserWarning, stacklevel=2)
+ return self._machar.smallest_normal.flat[0]
+
+ @property
+ def tiny(self):
+ """Return the value for tiny, alias of smallest_normal.
+
+ Returns
+ -------
+ tiny : float
+ Value for the smallest normal, alias of smallest_normal.
+
+ Warns
+ -----
+ UserWarning
+ If the calculated value for the smallest normal is requested for
+ double-double.
+ """
+ return self.smallest_normal
+
+
+@set_module('numpy')
+class iinfo:
+ """
+ iinfo(type)
+
+ Machine limits for integer types.
+
+ Attributes
+ ----------
+ bits : int
+ The number of bits occupied by the type.
+ dtype : dtype
+ Returns the dtype for which `iinfo` returns information.
+ min : int
+ The smallest integer expressible by the type.
+ max : int
+ The largest integer expressible by the type.
+
+ Parameters
+ ----------
+ int_type : integer type, dtype, or instance
+ The kind of integer data type to get information about.
+
+ See Also
+ --------
+ finfo : The equivalent for floating point data types.
+
+ Examples
+ --------
+ With types:
+
+ >>> import numpy as np
+ >>> ii16 = np.iinfo(np.int16)
+ >>> ii16.min
+ -32768
+ >>> ii16.max
+ 32767
+ >>> ii32 = np.iinfo(np.int32)
+ >>> ii32.min
+ -2147483648
+ >>> ii32.max
+ 2147483647
+
+ With instances:
+
+ >>> ii32 = np.iinfo(np.int32(10))
+ >>> ii32.min
+ -2147483648
+ >>> ii32.max
+ 2147483647
+
+ """
+
+ _min_vals = {}
+ _max_vals = {}
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+ def __init__(self, int_type):
+ try:
+ self.dtype = numeric.dtype(int_type)
+ except TypeError:
+ self.dtype = numeric.dtype(type(int_type))
+ self.kind = self.dtype.kind
+ self.bits = self.dtype.itemsize * 8
+ self.key = "%s%d" % (self.kind, self.bits)
+ if self.kind not in 'iu':
+ raise ValueError(f"Invalid integer data type {self.kind!r}.")
+
+ @property
+ def min(self):
+ """Minimum value of given dtype."""
+ if self.kind == 'u':
+ return 0
+ else:
+ try:
+ val = iinfo._min_vals[self.key]
+ except KeyError:
+ val = int(-(1 << (self.bits - 1)))
+ iinfo._min_vals[self.key] = val
+ return val
+
+ @property
+ def max(self):
+ """Maximum value of given dtype."""
+ try:
+ val = iinfo._max_vals[self.key]
+ except KeyError:
+ if self.kind == 'u':
+ val = int((1 << self.bits) - 1)
+ else:
+ val = int((1 << (self.bits - 1)) - 1)
+ iinfo._max_vals[self.key] = val
+ return val
+
+ def __str__(self):
+ """String representation."""
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'min = %(min)s\n'
+ 'max = %(max)s\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
+
+ def __repr__(self):
+ return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
+ self.min, self.max, self.dtype)
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.pyi
new file mode 100644
index 00000000..9d79b178
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/getlimits.pyi
@@ -0,0 +1,3 @@
+from numpy import finfo, iinfo
+
+__all__ = ["finfo", "iinfo"]
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.c b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.c
new file mode 100644
index 00000000..8398c627
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.c
@@ -0,0 +1,376 @@
+
+/* These pointers will be stored in the C-object for use in other
+ extension modules
+*/
+
+void *PyArray_API[] = {
+ (void *) PyArray_GetNDArrayCVersion,
+ NULL,
+ (void *) &PyArray_Type,
+ (void *) &PyArrayDescr_Type,
+ NULL,
+ (void *) &PyArrayIter_Type,
+ (void *) &PyArrayMultiIter_Type,
+ (int *) &NPY_NUMUSERTYPES,
+ (void *) &PyBoolArrType_Type,
+ (void *) &_PyArrayScalar_BoolValues,
+ (void *) &PyGenericArrType_Type,
+ (void *) &PyNumberArrType_Type,
+ (void *) &PyIntegerArrType_Type,
+ (void *) &PySignedIntegerArrType_Type,
+ (void *) &PyUnsignedIntegerArrType_Type,
+ (void *) &PyInexactArrType_Type,
+ (void *) &PyFloatingArrType_Type,
+ (void *) &PyComplexFloatingArrType_Type,
+ (void *) &PyFlexibleArrType_Type,
+ (void *) &PyCharacterArrType_Type,
+ (void *) &PyByteArrType_Type,
+ (void *) &PyShortArrType_Type,
+ (void *) &PyIntArrType_Type,
+ (void *) &PyLongArrType_Type,
+ (void *) &PyLongLongArrType_Type,
+ (void *) &PyUByteArrType_Type,
+ (void *) &PyUShortArrType_Type,
+ (void *) &PyUIntArrType_Type,
+ (void *) &PyULongArrType_Type,
+ (void *) &PyULongLongArrType_Type,
+ (void *) &PyFloatArrType_Type,
+ (void *) &PyDoubleArrType_Type,
+ (void *) &PyLongDoubleArrType_Type,
+ (void *) &PyCFloatArrType_Type,
+ (void *) &PyCDoubleArrType_Type,
+ (void *) &PyCLongDoubleArrType_Type,
+ (void *) &PyObjectArrType_Type,
+ (void *) &PyStringArrType_Type,
+ (void *) &PyUnicodeArrType_Type,
+ (void *) &PyVoidArrType_Type,
+ NULL,
+ NULL,
+ (void *) PyArray_INCREF,
+ (void *) PyArray_XDECREF,
+ (void *) PyArray_SetStringFunction,
+ (void *) PyArray_DescrFromType,
+ (void *) PyArray_TypeObjectFromType,
+ (void *) PyArray_Zero,
+ (void *) PyArray_One,
+ (void *) PyArray_CastToType,
+ (void *) PyArray_CopyInto,
+ (void *) PyArray_CopyAnyInto,
+ (void *) PyArray_CanCastSafely,
+ (void *) PyArray_CanCastTo,
+ (void *) PyArray_ObjectType,
+ (void *) PyArray_DescrFromObject,
+ (void *) PyArray_ConvertToCommonType,
+ (void *) PyArray_DescrFromScalar,
+ (void *) PyArray_DescrFromTypeObject,
+ (void *) PyArray_Size,
+ (void *) PyArray_Scalar,
+ (void *) PyArray_FromScalar,
+ (void *) PyArray_ScalarAsCtype,
+ (void *) PyArray_CastScalarToCtype,
+ (void *) PyArray_CastScalarDirect,
+ (void *) PyArray_Pack,
+ NULL,
+ NULL,
+ NULL,
+ (void *) PyArray_FromAny,
+ (void *) PyArray_EnsureArray,
+ (void *) PyArray_EnsureAnyArray,
+ (void *) PyArray_FromFile,
+ (void *) PyArray_FromString,
+ (void *) PyArray_FromBuffer,
+ (void *) PyArray_FromIter,
+ (void *) PyArray_Return,
+ (void *) PyArray_GetField,
+ (void *) PyArray_SetField,
+ (void *) PyArray_Byteswap,
+ (void *) PyArray_Resize,
+ NULL,
+ NULL,
+ NULL,
+ (void *) PyArray_CopyObject,
+ (void *) PyArray_NewCopy,
+ (void *) PyArray_ToList,
+ (void *) PyArray_ToString,
+ (void *) PyArray_ToFile,
+ (void *) PyArray_Dump,
+ (void *) PyArray_Dumps,
+ (void *) PyArray_ValidType,
+ (void *) PyArray_UpdateFlags,
+ (void *) PyArray_New,
+ (void *) PyArray_NewFromDescr,
+ (void *) PyArray_DescrNew,
+ (void *) PyArray_DescrNewFromType,
+ (void *) PyArray_GetPriority,
+ (void *) PyArray_IterNew,
+ (void *) PyArray_MultiIterNew,
+ (void *) PyArray_PyIntAsInt,
+ (void *) PyArray_PyIntAsIntp,
+ (void *) PyArray_Broadcast,
+ NULL,
+ (void *) PyArray_FillWithScalar,
+ (void *) PyArray_CheckStrides,
+ (void *) PyArray_DescrNewByteorder,
+ (void *) PyArray_IterAllButAxis,
+ (void *) PyArray_CheckFromAny,
+ (void *) PyArray_FromArray,
+ (void *) PyArray_FromInterface,
+ (void *) PyArray_FromStructInterface,
+ (void *) PyArray_FromArrayAttr,
+ (void *) PyArray_ScalarKind,
+ (void *) PyArray_CanCoerceScalar,
+ NULL,
+ (void *) PyArray_CanCastScalar,
+ NULL,
+ (void *) PyArray_RemoveSmallest,
+ (void *) PyArray_ElementStrides,
+ (void *) PyArray_Item_INCREF,
+ (void *) PyArray_Item_XDECREF,
+ NULL,
+ (void *) PyArray_Transpose,
+ (void *) PyArray_TakeFrom,
+ (void *) PyArray_PutTo,
+ (void *) PyArray_PutMask,
+ (void *) PyArray_Repeat,
+ (void *) PyArray_Choose,
+ (void *) PyArray_Sort,
+ (void *) PyArray_ArgSort,
+ (void *) PyArray_SearchSorted,
+ (void *) PyArray_ArgMax,
+ (void *) PyArray_ArgMin,
+ (void *) PyArray_Reshape,
+ (void *) PyArray_Newshape,
+ (void *) PyArray_Squeeze,
+ (void *) PyArray_View,
+ (void *) PyArray_SwapAxes,
+ (void *) PyArray_Max,
+ (void *) PyArray_Min,
+ (void *) PyArray_Ptp,
+ (void *) PyArray_Mean,
+ (void *) PyArray_Trace,
+ (void *) PyArray_Diagonal,
+ (void *) PyArray_Clip,
+ (void *) PyArray_Conjugate,
+ (void *) PyArray_Nonzero,
+ (void *) PyArray_Std,
+ (void *) PyArray_Sum,
+ (void *) PyArray_CumSum,
+ (void *) PyArray_Prod,
+ (void *) PyArray_CumProd,
+ (void *) PyArray_All,
+ (void *) PyArray_Any,
+ (void *) PyArray_Compress,
+ (void *) PyArray_Flatten,
+ (void *) PyArray_Ravel,
+ (void *) PyArray_MultiplyList,
+ (void *) PyArray_MultiplyIntList,
+ (void *) PyArray_GetPtr,
+ (void *) PyArray_CompareLists,
+ (void *) PyArray_AsCArray,
+ NULL,
+ NULL,
+ (void *) PyArray_Free,
+ (void *) PyArray_Converter,
+ (void *) PyArray_IntpFromSequence,
+ (void *) PyArray_Concatenate,
+ (void *) PyArray_InnerProduct,
+ (void *) PyArray_MatrixProduct,
+ NULL,
+ (void *) PyArray_Correlate,
+ NULL,
+ (void *) PyArray_DescrConverter,
+ (void *) PyArray_DescrConverter2,
+ (void *) PyArray_IntpConverter,
+ (void *) PyArray_BufferConverter,
+ (void *) PyArray_AxisConverter,
+ (void *) PyArray_BoolConverter,
+ (void *) PyArray_ByteorderConverter,
+ (void *) PyArray_OrderConverter,
+ (void *) PyArray_EquivTypes,
+ (void *) PyArray_Zeros,
+ (void *) PyArray_Empty,
+ (void *) PyArray_Where,
+ (void *) PyArray_Arange,
+ (void *) PyArray_ArangeObj,
+ (void *) PyArray_SortkindConverter,
+ (void *) PyArray_LexSort,
+ (void *) PyArray_Round,
+ (void *) PyArray_EquivTypenums,
+ (void *) PyArray_RegisterDataType,
+ (void *) PyArray_RegisterCastFunc,
+ (void *) PyArray_RegisterCanCast,
+ (void *) PyArray_InitArrFuncs,
+ (void *) PyArray_IntTupleFromIntp,
+ NULL,
+ (void *) PyArray_ClipmodeConverter,
+ (void *) PyArray_OutputConverter,
+ (void *) PyArray_BroadcastToShape,
+ NULL,
+ NULL,
+ (void *) PyArray_DescrAlignConverter,
+ (void *) PyArray_DescrAlignConverter2,
+ (void *) PyArray_SearchsideConverter,
+ (void *) PyArray_CheckAxis,
+ (void *) PyArray_OverflowMultiplyList,
+ NULL,
+ (void *) PyArray_MultiIterFromObjects,
+ (void *) PyArray_GetEndianness,
+ (void *) PyArray_GetNDArrayCFeatureVersion,
+ (void *) PyArray_Correlate2,
+ (void *) PyArray_NeighborhoodIterNew,
+ (void *) &PyTimeIntegerArrType_Type,
+ (void *) &PyDatetimeArrType_Type,
+ (void *) &PyTimedeltaArrType_Type,
+ (void *) &PyHalfArrType_Type,
+ (void *) &NpyIter_Type,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ (void *) NpyIter_GetTransferFlags,
+ (void *) NpyIter_New,
+ (void *) NpyIter_MultiNew,
+ (void *) NpyIter_AdvancedNew,
+ (void *) NpyIter_Copy,
+ (void *) NpyIter_Deallocate,
+ (void *) NpyIter_HasDelayedBufAlloc,
+ (void *) NpyIter_HasExternalLoop,
+ (void *) NpyIter_EnableExternalLoop,
+ (void *) NpyIter_GetInnerStrideArray,
+ (void *) NpyIter_GetInnerLoopSizePtr,
+ (void *) NpyIter_Reset,
+ (void *) NpyIter_ResetBasePointers,
+ (void *) NpyIter_ResetToIterIndexRange,
+ (void *) NpyIter_GetNDim,
+ (void *) NpyIter_GetNOp,
+ (void *) NpyIter_GetIterNext,
+ (void *) NpyIter_GetIterSize,
+ (void *) NpyIter_GetIterIndexRange,
+ (void *) NpyIter_GetIterIndex,
+ (void *) NpyIter_GotoIterIndex,
+ (void *) NpyIter_HasMultiIndex,
+ (void *) NpyIter_GetShape,
+ (void *) NpyIter_GetGetMultiIndex,
+ (void *) NpyIter_GotoMultiIndex,
+ (void *) NpyIter_RemoveMultiIndex,
+ (void *) NpyIter_HasIndex,
+ (void *) NpyIter_IsBuffered,
+ (void *) NpyIter_IsGrowInner,
+ (void *) NpyIter_GetBufferSize,
+ (void *) NpyIter_GetIndexPtr,
+ (void *) NpyIter_GotoIndex,
+ (void *) NpyIter_GetDataPtrArray,
+ (void *) NpyIter_GetDescrArray,
+ (void *) NpyIter_GetOperandArray,
+ (void *) NpyIter_GetIterView,
+ (void *) NpyIter_GetReadFlags,
+ (void *) NpyIter_GetWriteFlags,
+ (void *) NpyIter_DebugPrint,
+ (void *) NpyIter_IterationNeedsAPI,
+ (void *) NpyIter_GetInnerFixedStrideArray,
+ (void *) NpyIter_RemoveAxis,
+ (void *) NpyIter_GetAxisStrideArray,
+ (void *) NpyIter_RequiresBuffering,
+ (void *) NpyIter_GetInitialDataPtrArray,
+ (void *) NpyIter_CreateCompatibleStrides,
+ (void *) PyArray_CastingConverter,
+ (void *) PyArray_CountNonzero,
+ (void *) PyArray_PromoteTypes,
+ (void *) PyArray_MinScalarType,
+ (void *) PyArray_ResultType,
+ (void *) PyArray_CanCastArrayTo,
+ (void *) PyArray_CanCastTypeTo,
+ (void *) PyArray_EinsteinSum,
+ (void *) PyArray_NewLikeArray,
+ NULL,
+ (void *) PyArray_ConvertClipmodeSequence,
+ (void *) PyArray_MatrixProduct2,
+ (void *) NpyIter_IsFirstVisit,
+ (void *) PyArray_SetBaseObject,
+ (void *) PyArray_CreateSortedStridePerm,
+ (void *) PyArray_RemoveAxesInPlace,
+ (void *) PyArray_DebugPrint,
+ (void *) PyArray_FailUnlessWriteable,
+ (void *) PyArray_SetUpdateIfCopyBase,
+ (void *) PyDataMem_NEW,
+ (void *) PyDataMem_FREE,
+ (void *) PyDataMem_RENEW,
+ NULL,
+ (NPY_CASTING *) &NPY_DEFAULT_ASSIGN_CASTING,
+ NULL,
+ NULL,
+ NULL,
+ (void *) PyArray_Partition,
+ (void *) PyArray_ArgPartition,
+ (void *) PyArray_SelectkindConverter,
+ (void *) PyDataMem_NEW_ZEROED,
+ (void *) PyArray_CheckAnyScalarExact,
+ NULL,
+ (void *) PyArray_ResolveWritebackIfCopy,
+ (void *) PyArray_SetWritebackIfCopyBase,
+ (void *) PyDataMem_SetHandler,
+ (void *) PyDataMem_GetHandler,
+ (PyObject* *) &PyDataMem_DefaultHandler,
+ (void *) NpyDatetime_ConvertDatetime64ToDatetimeStruct,
+ (void *) NpyDatetime_ConvertDatetimeStructToDatetime64,
+ (void *) NpyDatetime_ConvertPyDateTimeToDatetimeStruct,
+ (void *) NpyDatetime_GetDatetimeISO8601StrLen,
+ (void *) NpyDatetime_MakeISO8601Datetime,
+ (void *) NpyDatetime_ParseISO8601Datetime,
+ (void *) NpyString_load,
+ (void *) NpyString_pack,
+ (void *) NpyString_pack_null,
+ (void *) NpyString_acquire_allocator,
+ (void *) NpyString_acquire_allocators,
+ (void *) NpyString_release_allocator,
+ (void *) NpyString_release_allocators,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ (void *) PyArray_GetDefaultDescr,
+ (void *) PyArrayInitDTypeMeta_FromSpec,
+ (void *) PyArray_CommonDType,
+ (void *) PyArray_PromoteDTypeSequence,
+ (void *) _PyDataType_GetArrFuncs,
+ NULL,
+ NULL,
+ NULL
+};
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.h
new file mode 100644
index 00000000..34363fb2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__multiarray_api.h
@@ -0,0 +1,1622 @@
+
+#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
+
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \
+ (void);
+extern NPY_NO_EXPORT PyTypeObject PyArray_Type;
+
+extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull;
+#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull))
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type;
+
+extern NPY_NO_EXPORT int NPY_NUMUSERTYPES;
+
+extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type;
+
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type;
+
+NPY_NO_EXPORT int PyArray_INCREF \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_XDECREF \
+ (PyArrayObject *);
+NPY_NO_EXPORT void PyArray_SetStringFunction \
+ (PyObject *, int);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \
+ (int);
+NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \
+ (int);
+NPY_NO_EXPORT char * PyArray_Zero \
+ (PyArrayObject *);
+NPY_NO_EXPORT char * PyArray_One \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CastToType \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT int PyArray_CopyInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CopyAnyInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CanCastSafely \
+ (int, int);
+NPY_NO_EXPORT npy_bool PyArray_CanCastTo \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_ObjectType \
+ (PyObject *, int);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \
+ (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \
+ (PyObject *, int *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \
+ (PyObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \
+ (PyObject *);
+NPY_NO_EXPORT npy_intp PyArray_Size \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Scalar \
+ (void *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \
+ (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT void PyArray_ScalarAsCtype \
+ (PyObject *, void *);
+NPY_NO_EXPORT int PyArray_CastScalarToCtype \
+ (PyObject *, void *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_CastScalarDirect \
+ (PyObject *, PyArray_Descr *, void *, int);
+NPY_NO_EXPORT int PyArray_Pack \
+ (PyArray_Descr *, void *, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \
+ (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \
+ (PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromFile \
+ (FILE *, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT PyObject * PyArray_FromString \
+ (char *, npy_intp, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT PyObject * PyArray_FromBuffer \
+ (PyObject *, PyArray_Descr *, npy_intp, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \
+ (PyObject *, PyArray_Descr *, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_GetField \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetField \
+ (PyArrayObject *, PyArray_Descr *, int, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Byteswap \
+ (PyArrayObject *, npy_bool);
+NPY_NO_EXPORT PyObject * PyArray_Resize \
+ (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order));
+NPY_NO_EXPORT int PyArray_CopyObject \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_NewCopy \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_ToList \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_ToString \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT int PyArray_ToFile \
+ (PyArrayObject *, FILE *, char *, char *);
+NPY_NO_EXPORT int PyArray_Dump \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Dumps \
+ (PyObject *, int);
+NPY_NO_EXPORT int PyArray_ValidType \
+ (int);
+NPY_NO_EXPORT void PyArray_UpdateFlags \
+ (PyArrayObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_New \
+ (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_NewFromDescr \
+ (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \
+ (PyArray_Descr *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \
+ (int);
+NPY_NO_EXPORT double PyArray_GetPriority \
+ (PyObject *, double);
+NPY_NO_EXPORT PyObject * PyArray_IterNew \
+ (PyObject *);
+NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \
+ (int, ...);
+NPY_NO_EXPORT int PyArray_PyIntAsInt \
+ (PyObject *);
+NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \
+ (PyObject *);
+NPY_NO_EXPORT int PyArray_Broadcast \
+ (PyArrayMultiIterObject *);
+NPY_NO_EXPORT int PyArray_FillWithScalar \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT npy_bool PyArray_CheckStrides \
+ (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \
+ (PyArray_Descr *, char);
+NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \
+ (PyObject *, int *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \
+ (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_FromInterface \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \
+ (PyObject *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \
+ (int, PyArrayObject **);
+NPY_NO_EXPORT int PyArray_CanCoerceScalar \
+ (int, int, NPY_SCALARKIND);
+NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \
+ (PyTypeObject *, PyTypeObject *);
+NPY_NO_EXPORT int PyArray_RemoveSmallest \
+ (PyArrayMultiIterObject *);
+NPY_NO_EXPORT int PyArray_ElementStrides \
+ (PyObject *);
+NPY_NO_EXPORT void PyArray_Item_INCREF \
+ (char *, PyArray_Descr *);
+NPY_NO_EXPORT void PyArray_Item_XDECREF \
+ (char *, PyArray_Descr *);
+NPY_NO_EXPORT PyObject * PyArray_Transpose \
+ (PyArrayObject *, PyArray_Dims *);
+NPY_NO_EXPORT PyObject * PyArray_TakeFrom \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT PyObject * PyArray_PutTo \
+ (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT PyObject * PyArray_PutMask \
+ (PyArrayObject *, PyObject*, PyObject*);
+NPY_NO_EXPORT PyObject * PyArray_Repeat \
+ (PyArrayObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Choose \
+ (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT int PyArray_Sort \
+ (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT PyObject * PyArray_ArgSort \
+ (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT PyObject * PyArray_SearchSorted \
+ (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_ArgMax \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_ArgMin \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Reshape \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Newshape \
+ (PyArrayObject *, PyArray_Dims *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_Squeeze \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \
+ (PyArrayObject *, PyArray_Descr *, PyTypeObject *);
+NPY_NO_EXPORT PyObject * PyArray_SwapAxes \
+ (PyArrayObject *, int, int);
+NPY_NO_EXPORT PyObject * PyArray_Max \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Min \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Ptp \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Mean \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Trace \
+ (PyArrayObject *, int, int, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Diagonal \
+ (PyArrayObject *, int, int, int);
+NPY_NO_EXPORT PyObject * PyArray_Clip \
+ (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Conjugate \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Nonzero \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Std \
+ (PyArrayObject *, int, int, PyArrayObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Sum \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_CumSum \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Prod \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_CumProd \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_All \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Any \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Compress \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Flatten \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_Ravel \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT npy_intp PyArray_MultiplyList \
+ (npy_intp const *, int);
+NPY_NO_EXPORT int PyArray_MultiplyIntList \
+ (int const *, int);
+NPY_NO_EXPORT void * PyArray_GetPtr \
+ (PyArrayObject *, npy_intp const*);
+NPY_NO_EXPORT int PyArray_CompareLists \
+ (npy_intp const *, npy_intp const *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \
+ (PyObject **, void *, npy_intp *, int, PyArray_Descr*);
+NPY_NO_EXPORT int PyArray_Free \
+ (PyObject *, void *);
+NPY_NO_EXPORT int PyArray_Converter \
+ (PyObject *, PyObject **);
+NPY_NO_EXPORT int PyArray_IntpFromSequence \
+ (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT PyObject * PyArray_Concatenate \
+ (PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_InnerProduct \
+ (PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \
+ (PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Correlate \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT int PyArray_DescrConverter \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_DescrConverter2 \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_IntpConverter \
+ (PyObject *, PyArray_Dims *);
+NPY_NO_EXPORT int PyArray_BufferConverter \
+ (PyObject *, PyArray_Chunk *);
+NPY_NO_EXPORT int PyArray_AxisConverter \
+ (PyObject *, int *);
+NPY_NO_EXPORT int PyArray_BoolConverter \
+ (PyObject *, npy_bool *);
+NPY_NO_EXPORT int PyArray_ByteorderConverter \
+ (PyObject *, char *);
+NPY_NO_EXPORT int PyArray_OrderConverter \
+ (PyObject *, NPY_ORDER *);
+NPY_NO_EXPORT unsigned char PyArray_EquivTypes \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \
+ (int, npy_intp const *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \
+ (int, npy_intp const *, PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_Where \
+ (PyObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Arange \
+ (double, double, double, int);
+NPY_NO_EXPORT PyObject * PyArray_ArangeObj \
+ (PyObject *, PyObject *, PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_SortkindConverter \
+ (PyObject *, NPY_SORTKIND *);
+NPY_NO_EXPORT PyObject * PyArray_LexSort \
+ (PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Round \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \
+ (int, int);
+NPY_NO_EXPORT int PyArray_RegisterDataType \
+ (PyArray_DescrProto *);
+NPY_NO_EXPORT int PyArray_RegisterCastFunc \
+ (PyArray_Descr *, int, PyArray_VectorUnaryFunc *);
+NPY_NO_EXPORT int PyArray_RegisterCanCast \
+ (PyArray_Descr *, int, NPY_SCALARKIND);
+NPY_NO_EXPORT void PyArray_InitArrFuncs \
+ (PyArray_ArrFuncs *);
+NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \
+ (int, npy_intp const *);
+NPY_NO_EXPORT int PyArray_ClipmodeConverter \
+ (PyObject *, NPY_CLIPMODE *);
+NPY_NO_EXPORT int PyArray_OutputConverter \
+ (PyObject *, PyArrayObject **);
+NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \
+ (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT int PyArray_DescrAlignConverter \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_SearchsideConverter \
+ (PyObject *, void *);
+NPY_NO_EXPORT PyObject * PyArray_CheckAxis \
+ (PyArrayObject *, int *, int);
+NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \
+ (npy_intp const *, int);
+NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \
+ (PyObject **, int, int, ...);
+NPY_NO_EXPORT int PyArray_GetEndianness \
+ (void);
+NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \
+ (void);
+NPY_NO_EXPORT PyObject * PyArray_Correlate2 \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \
+ (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*);
+extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject NpyIter_Type;
+
+NPY_NO_EXPORT NPY_ARRAYMETHOD_FLAGS NpyIter_GetTransferFlags \
+ (NpyIter *);
+NPY_NO_EXPORT NpyIter * NpyIter_New \
+ (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*);
+NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \
+ (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **);
+NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \
+ (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp);
+NPY_NO_EXPORT NpyIter * NpyIter_Copy \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_Deallocate \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_EnableExternalLoop \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_Reset \
+ (NpyIter *, char **);
+NPY_NO_EXPORT int NpyIter_ResetBasePointers \
+ (NpyIter *, char **, char **);
+NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \
+ (NpyIter *, npy_intp, npy_intp, char **);
+NPY_NO_EXPORT int NpyIter_GetNDim \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GetNOp \
+ (NpyIter *);
+NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \
+ (NpyIter *, char **);
+NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \
+ (NpyIter *);
+NPY_NO_EXPORT void NpyIter_GetIterIndexRange \
+ (NpyIter *, npy_intp *, npy_intp *);
+NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GotoIterIndex \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GetShape \
+ (NpyIter *, npy_intp *);
+NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \
+ (NpyIter *, char **);
+NPY_NO_EXPORT int NpyIter_GotoMultiIndex \
+ (NpyIter *, npy_intp const *);
+NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasIndex \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GotoIndex \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT void NpyIter_GetReadFlags \
+ (NpyIter *, char *);
+NPY_NO_EXPORT void NpyIter_GetWriteFlags \
+ (NpyIter *, char *);
+NPY_NO_EXPORT void NpyIter_DebugPrint \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \
+ (NpyIter *);
+NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \
+ (NpyIter *, npy_intp *);
+NPY_NO_EXPORT int NpyIter_RemoveAxis \
+ (NpyIter *, int);
+NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \
+ (NpyIter *, int);
+NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \
+ (NpyIter *);
+NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \
+ (NpyIter *, npy_intp, npy_intp *);
+NPY_NO_EXPORT int PyArray_CastingConverter \
+ (PyObject *, NPY_CASTING *);
+NPY_NO_EXPORT npy_intp PyArray_CountNonzero \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \
+ (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]);
+NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \
+ (PyArrayObject *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \
+ (PyArray_Descr *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \
+ (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_NewLikeArray \
+ (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int);
+NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \
+ (PyObject *, NPY_CLIPMODE *, int);
+NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \
+ (PyObject *, PyObject *, PyArrayObject*);
+NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \
+ (NpyIter *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \
+ (int, npy_intp const *, npy_stride_sort_item *);
+NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \
+ (PyArrayObject *, const npy_bool *);
+NPY_NO_EXPORT void PyArray_DebugPrint \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_FailUnlessWriteable \
+ (PyArrayObject *, const char *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT void * PyDataMem_NEW \
+ (size_t);
+NPY_NO_EXPORT void PyDataMem_FREE \
+ (void *);
+NPY_NO_EXPORT void * PyDataMem_RENEW \
+ (void *, size_t);
+extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING;
+
+NPY_NO_EXPORT int PyArray_Partition \
+ (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT PyObject * PyArray_ArgPartition \
+ (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT int PyArray_SelectkindConverter \
+ (PyObject *, NPY_SELECTKIND *);
+NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \
+ (size_t, size_t);
+NPY_NO_EXPORT int PyArray_CheckAnyScalarExact \
+ (PyObject *);
+NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyDataMem_SetHandler \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyDataMem_GetHandler \
+ (void);
+extern NPY_NO_EXPORT PyObject* PyDataMem_DefaultHandler;
+
+NPY_NO_EXPORT int NpyDatetime_ConvertDatetime64ToDatetimeStruct \
+ (PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *);
+NPY_NO_EXPORT int NpyDatetime_ConvertDatetimeStructToDatetime64 \
+ (PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *);
+NPY_NO_EXPORT int NpyDatetime_ConvertPyDateTimeToDatetimeStruct \
+ (PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int);
+NPY_NO_EXPORT int NpyDatetime_GetDatetimeISO8601StrLen \
+ (int, NPY_DATETIMEUNIT);
+NPY_NO_EXPORT int NpyDatetime_MakeISO8601Datetime \
+ (npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING);
+NPY_NO_EXPORT int NpyDatetime_ParseISO8601Datetime \
+ (char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *);
+NPY_NO_EXPORT int NpyString_load \
+ (npy_string_allocator *, const npy_packed_static_string *, npy_static_string *);
+NPY_NO_EXPORT int NpyString_pack \
+ (npy_string_allocator *, npy_packed_static_string *, const char *, size_t);
+NPY_NO_EXPORT int NpyString_pack_null \
+ (npy_string_allocator *, npy_packed_static_string *);
+NPY_NO_EXPORT npy_string_allocator * NpyString_acquire_allocator \
+ (const PyArray_StringDTypeObject *);
+NPY_NO_EXPORT void NpyString_acquire_allocators \
+ (size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]);
+NPY_NO_EXPORT void NpyString_release_allocator \
+ (npy_string_allocator *);
+NPY_NO_EXPORT void NpyString_release_allocators \
+ (size_t, npy_string_allocator *allocators[]);
+NPY_NO_EXPORT PyArray_Descr * PyArray_GetDefaultDescr \
+ (PyArray_DTypeMeta *);
+NPY_NO_EXPORT int PyArrayInitDTypeMeta_FromSpec \
+ (PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *);
+NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_CommonDType \
+ (PyArray_DTypeMeta *, PyArray_DTypeMeta *);
+NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_PromoteDTypeSequence \
+ (npy_intp, PyArray_DTypeMeta **);
+NPY_NO_EXPORT PyArray_ArrFuncs * _PyDataType_GetArrFuncs \
+ (const PyArray_Descr *);
+
+#else
+
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+ #define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
+ #define _NPY_VERSION_CONCAT_HELPER2(x, y) x ## y
+ #define _NPY_VERSION_CONCAT_HELPER(arg) \
+ _NPY_VERSION_CONCAT_HELPER2(arg, PyArray_RUNTIME_VERSION)
+ #define PyArray_RUNTIME_VERSION \
+ _NPY_VERSION_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL)
+#endif
+
+/* By default do not export API in an .so (was never the case on windows) */
+#ifndef NPY_API_SYMBOL_ATTRIBUTE
+ #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+extern NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API;
+extern NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION;
+#else
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API;
+NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION;
+#else
+static void **PyArray_API = NULL;
+static int PyArray_RUNTIME_VERSION = 0;
+#endif
+#endif
+
+#define PyArray_GetNDArrayCVersion \
+ (*(unsigned int (*)(void)) \
+ PyArray_API[0])
+#define PyArray_Type (*(PyTypeObject *)PyArray_API[2])
+#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3])
+#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5])
+#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6])
+#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7])
+#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8])
+#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9])
+#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10])
+#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11])
+#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12])
+#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13])
+#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14])
+#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15])
+#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16])
+#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17])
+#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18])
+#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19])
+#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20])
+#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21])
+#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22])
+#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23])
+#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24])
+#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25])
+#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26])
+#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27])
+#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28])
+#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29])
+#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30])
+#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31])
+#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32])
+#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33])
+#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34])
+#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35])
+#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36])
+#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37])
+#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38])
+#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39])
+#define PyArray_INCREF \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[42])
+#define PyArray_XDECREF \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[43])
+#define PyArray_SetStringFunction \
+ (*(void (*)(PyObject *, int)) \
+ PyArray_API[44])
+#define PyArray_DescrFromType \
+ (*(PyArray_Descr * (*)(int)) \
+ PyArray_API[45])
+#define PyArray_TypeObjectFromType \
+ (*(PyObject * (*)(int)) \
+ PyArray_API[46])
+#define PyArray_Zero \
+ (*(char * (*)(PyArrayObject *)) \
+ PyArray_API[47])
+#define PyArray_One \
+ (*(char * (*)(PyArrayObject *)) \
+ PyArray_API[48])
+#define PyArray_CastToType \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[49])
+#define PyArray_CopyInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[50])
+#define PyArray_CopyAnyInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[51])
+#define PyArray_CanCastSafely \
+ (*(int (*)(int, int)) \
+ PyArray_API[52])
+#define PyArray_CanCastTo \
+ (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[53])
+#define PyArray_ObjectType \
+ (*(int (*)(PyObject *, int)) \
+ PyArray_API[54])
+#define PyArray_DescrFromObject \
+ (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \
+ PyArray_API[55])
+#define PyArray_ConvertToCommonType \
+ (*(PyArrayObject ** (*)(PyObject *, int *)) \
+ PyArray_API[56])
+#define PyArray_DescrFromScalar \
+ (*(PyArray_Descr * (*)(PyObject *)) \
+ PyArray_API[57])
+#define PyArray_DescrFromTypeObject \
+ (*(PyArray_Descr * (*)(PyObject *)) \
+ PyArray_API[58])
+#define PyArray_Size \
+ (*(npy_intp (*)(PyObject *)) \
+ PyArray_API[59])
+#define PyArray_Scalar \
+ (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \
+ PyArray_API[60])
+#define PyArray_FromScalar \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \
+ PyArray_API[61])
+#define PyArray_ScalarAsCtype \
+ (*(void (*)(PyObject *, void *)) \
+ PyArray_API[62])
+#define PyArray_CastScalarToCtype \
+ (*(int (*)(PyObject *, void *, PyArray_Descr *)) \
+ PyArray_API[63])
+#define PyArray_CastScalarDirect \
+ (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \
+ PyArray_API[64])
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyArray_Pack \
+ (*(int (*)(PyArray_Descr *, void *, PyObject *)) \
+ PyArray_API[65])
+#endif
+#define PyArray_FromAny \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+ PyArray_API[69])
+#define PyArray_EnsureArray \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[70])
+#define PyArray_EnsureAnyArray \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[71])
+#define PyArray_FromFile \
+ (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \
+ PyArray_API[72])
+#define PyArray_FromString \
+ (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \
+ PyArray_API[73])
+#define PyArray_FromBuffer \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \
+ PyArray_API[74])
+#define PyArray_FromIter \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \
+ PyArray_API[75])
+#define PyArray_Return \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[76])
+#define PyArray_GetField \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[77])
+#define PyArray_SetField \
+ (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \
+ PyArray_API[78])
+#define PyArray_Byteswap \
+ (*(PyObject * (*)(PyArrayObject *, npy_bool)) \
+ PyArray_API[79])
+#define PyArray_Resize \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \
+ PyArray_API[80])
+#define PyArray_CopyObject \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[84])
+#define PyArray_NewCopy \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[85])
+#define PyArray_ToList \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[86])
+#define PyArray_ToString \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[87])
+#define PyArray_ToFile \
+ (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \
+ PyArray_API[88])
+#define PyArray_Dump \
+ (*(int (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[89])
+#define PyArray_Dumps \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[90])
+#define PyArray_ValidType \
+ (*(int (*)(int)) \
+ PyArray_API[91])
+#define PyArray_UpdateFlags \
+ (*(void (*)(PyArrayObject *, int)) \
+ PyArray_API[92])
+#define PyArray_New \
+ (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \
+ PyArray_API[93])
+#define PyArray_NewFromDescr \
+ (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \
+ PyArray_API[94])
+#define PyArray_DescrNew \
+ (*(PyArray_Descr * (*)(PyArray_Descr *)) \
+ PyArray_API[95])
+#define PyArray_DescrNewFromType \
+ (*(PyArray_Descr * (*)(int)) \
+ PyArray_API[96])
+#define PyArray_GetPriority \
+ (*(double (*)(PyObject *, double)) \
+ PyArray_API[97])
+#define PyArray_IterNew \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[98])
+#define PyArray_MultiIterNew \
+ (*(PyObject* (*)(int, ...)) \
+ PyArray_API[99])
+#define PyArray_PyIntAsInt \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[100])
+#define PyArray_PyIntAsIntp \
+ (*(npy_intp (*)(PyObject *)) \
+ PyArray_API[101])
+#define PyArray_Broadcast \
+ (*(int (*)(PyArrayMultiIterObject *)) \
+ PyArray_API[102])
+#define PyArray_FillWithScalar \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[104])
+#define PyArray_CheckStrides \
+ (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \
+ PyArray_API[105])
+#define PyArray_DescrNewByteorder \
+ (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \
+ PyArray_API[106])
+#define PyArray_IterAllButAxis \
+ (*(PyObject * (*)(PyObject *, int *)) \
+ PyArray_API[107])
+#define PyArray_CheckFromAny \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+ PyArray_API[108])
+#define PyArray_FromArray \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[109])
+#define PyArray_FromInterface \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[110])
+#define PyArray_FromStructInterface \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[111])
+#define PyArray_FromArrayAttr \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \
+ PyArray_API[112])
+#define PyArray_ScalarKind \
+ (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \
+ PyArray_API[113])
+#define PyArray_CanCoerceScalar \
+ (*(int (*)(int, int, NPY_SCALARKIND)) \
+ PyArray_API[114])
+#define PyArray_CanCastScalar \
+ (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \
+ PyArray_API[116])
+#define PyArray_RemoveSmallest \
+ (*(int (*)(PyArrayMultiIterObject *)) \
+ PyArray_API[118])
+#define PyArray_ElementStrides \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[119])
+#define PyArray_Item_INCREF \
+ (*(void (*)(char *, PyArray_Descr *)) \
+ PyArray_API[120])
+#define PyArray_Item_XDECREF \
+ (*(void (*)(char *, PyArray_Descr *)) \
+ PyArray_API[121])
+#define PyArray_Transpose \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \
+ PyArray_API[123])
+#define PyArray_TakeFrom \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \
+ PyArray_API[124])
+#define PyArray_PutTo \
+ (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \
+ PyArray_API[125])
+#define PyArray_PutMask \
+ (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \
+ PyArray_API[126])
+#define PyArray_Repeat \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \
+ PyArray_API[127])
+#define PyArray_Choose \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \
+ PyArray_API[128])
+#define PyArray_Sort \
+ (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+ PyArray_API[129])
+#define PyArray_ArgSort \
+ (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+ PyArray_API[130])
+#define PyArray_SearchSorted \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \
+ PyArray_API[131])
+#define PyArray_ArgMax \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[132])
+#define PyArray_ArgMin \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[133])
+#define PyArray_Reshape \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[134])
+#define PyArray_Newshape \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \
+ PyArray_API[135])
+#define PyArray_Squeeze \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[136])
+#define PyArray_View \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \
+ PyArray_API[137])
+#define PyArray_SwapAxes \
+ (*(PyObject * (*)(PyArrayObject *, int, int)) \
+ PyArray_API[138])
+#define PyArray_Max \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[139])
+#define PyArray_Min \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[140])
+#define PyArray_Ptp \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[141])
+#define PyArray_Mean \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[142])
+#define PyArray_Trace \
+ (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \
+ PyArray_API[143])
+#define PyArray_Diagonal \
+ (*(PyObject * (*)(PyArrayObject *, int, int, int)) \
+ PyArray_API[144])
+#define PyArray_Clip \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \
+ PyArray_API[145])
+#define PyArray_Conjugate \
+ (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[146])
+#define PyArray_Nonzero \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[147])
+#define PyArray_Std \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \
+ PyArray_API[148])
+#define PyArray_Sum \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[149])
+#define PyArray_CumSum \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[150])
+#define PyArray_Prod \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[151])
+#define PyArray_CumProd \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[152])
+#define PyArray_All \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[153])
+#define PyArray_Any \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[154])
+#define PyArray_Compress \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \
+ PyArray_API[155])
+#define PyArray_Flatten \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[156])
+#define PyArray_Ravel \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[157])
+#define PyArray_MultiplyList \
+ (*(npy_intp (*)(npy_intp const *, int)) \
+ PyArray_API[158])
+#define PyArray_MultiplyIntList \
+ (*(int (*)(int const *, int)) \
+ PyArray_API[159])
+#define PyArray_GetPtr \
+ (*(void * (*)(PyArrayObject *, npy_intp const*)) \
+ PyArray_API[160])
+#define PyArray_CompareLists \
+ (*(int (*)(npy_intp const *, npy_intp const *, int)) \
+ PyArray_API[161])
+#define PyArray_AsCArray \
+ (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \
+ PyArray_API[162])
+#define PyArray_Free \
+ (*(int (*)(PyObject *, void *)) \
+ PyArray_API[165])
+#define PyArray_Converter \
+ (*(int (*)(PyObject *, PyObject **)) \
+ PyArray_API[166])
+#define PyArray_IntpFromSequence \
+ (*(int (*)(PyObject *, npy_intp *, int)) \
+ PyArray_API[167])
+#define PyArray_Concatenate \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[168])
+#define PyArray_InnerProduct \
+ (*(PyObject * (*)(PyObject *, PyObject *)) \
+ PyArray_API[169])
+#define PyArray_MatrixProduct \
+ (*(PyObject * (*)(PyObject *, PyObject *)) \
+ PyArray_API[170])
+#define PyArray_Correlate \
+ (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[172])
+#define PyArray_DescrConverter \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[174])
+#define PyArray_DescrConverter2 \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[175])
+#define PyArray_IntpConverter \
+ (*(int (*)(PyObject *, PyArray_Dims *)) \
+ PyArray_API[176])
+#define PyArray_BufferConverter \
+ (*(int (*)(PyObject *, PyArray_Chunk *)) \
+ PyArray_API[177])
+#define PyArray_AxisConverter \
+ (*(int (*)(PyObject *, int *)) \
+ PyArray_API[178])
+#define PyArray_BoolConverter \
+ (*(int (*)(PyObject *, npy_bool *)) \
+ PyArray_API[179])
+#define PyArray_ByteorderConverter \
+ (*(int (*)(PyObject *, char *)) \
+ PyArray_API[180])
+#define PyArray_OrderConverter \
+ (*(int (*)(PyObject *, NPY_ORDER *)) \
+ PyArray_API[181])
+#define PyArray_EquivTypes \
+ (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[182])
+#define PyArray_Zeros \
+ (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \
+ PyArray_API[183])
+#define PyArray_Empty \
+ (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \
+ PyArray_API[184])
+#define PyArray_Where \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \
+ PyArray_API[185])
+#define PyArray_Arange \
+ (*(PyObject * (*)(double, double, double, int)) \
+ PyArray_API[186])
+#define PyArray_ArangeObj \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \
+ PyArray_API[187])
+#define PyArray_SortkindConverter \
+ (*(int (*)(PyObject *, NPY_SORTKIND *)) \
+ PyArray_API[188])
+#define PyArray_LexSort \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[189])
+#define PyArray_Round \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[190])
+#define PyArray_EquivTypenums \
+ (*(unsigned char (*)(int, int)) \
+ PyArray_API[191])
+#define PyArray_RegisterDataType \
+ (*(int (*)(PyArray_DescrProto *)) \
+ PyArray_API[192])
+#define PyArray_RegisterCastFunc \
+ (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \
+ PyArray_API[193])
+#define PyArray_RegisterCanCast \
+ (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \
+ PyArray_API[194])
+#define PyArray_InitArrFuncs \
+ (*(void (*)(PyArray_ArrFuncs *)) \
+ PyArray_API[195])
+#define PyArray_IntTupleFromIntp \
+ (*(PyObject * (*)(int, npy_intp const *)) \
+ PyArray_API[196])
+#define PyArray_ClipmodeConverter \
+ (*(int (*)(PyObject *, NPY_CLIPMODE *)) \
+ PyArray_API[198])
+#define PyArray_OutputConverter \
+ (*(int (*)(PyObject *, PyArrayObject **)) \
+ PyArray_API[199])
+#define PyArray_BroadcastToShape \
+ (*(PyObject * (*)(PyObject *, npy_intp *, int)) \
+ PyArray_API[200])
+#define PyArray_DescrAlignConverter \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[203])
+#define PyArray_DescrAlignConverter2 \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[204])
+#define PyArray_SearchsideConverter \
+ (*(int (*)(PyObject *, void *)) \
+ PyArray_API[205])
+#define PyArray_CheckAxis \
+ (*(PyObject * (*)(PyArrayObject *, int *, int)) \
+ PyArray_API[206])
+#define PyArray_OverflowMultiplyList \
+ (*(npy_intp (*)(npy_intp const *, int)) \
+ PyArray_API[207])
+#define PyArray_MultiIterFromObjects \
+ (*(PyObject* (*)(PyObject **, int, int, ...)) \
+ PyArray_API[209])
+#define PyArray_GetEndianness \
+ (*(int (*)(void)) \
+ PyArray_API[210])
+#define PyArray_GetNDArrayCFeatureVersion \
+ (*(unsigned int (*)(void)) \
+ PyArray_API[211])
+#define PyArray_Correlate2 \
+ (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[212])
+#define PyArray_NeighborhoodIterNew \
+ (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \
+ PyArray_API[213])
+#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214])
+#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215])
+#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216])
+#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217])
+#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218])
+
+#if NPY_FEATURE_VERSION >= NPY_2_3_API_VERSION
+#define NpyIter_GetTransferFlags \
+ (*(NPY_ARRAYMETHOD_FLAGS (*)(NpyIter *)) \
+ PyArray_API[223])
+#endif
+#define NpyIter_New \
+ (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \
+ PyArray_API[224])
+#define NpyIter_MultiNew \
+ (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \
+ PyArray_API[225])
+#define NpyIter_AdvancedNew \
+ (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \
+ PyArray_API[226])
+#define NpyIter_Copy \
+ (*(NpyIter * (*)(NpyIter *)) \
+ PyArray_API[227])
+#define NpyIter_Deallocate \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[228])
+#define NpyIter_HasDelayedBufAlloc \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[229])
+#define NpyIter_HasExternalLoop \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[230])
+#define NpyIter_EnableExternalLoop \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[231])
+#define NpyIter_GetInnerStrideArray \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[232])
+#define NpyIter_GetInnerLoopSizePtr \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[233])
+#define NpyIter_Reset \
+ (*(int (*)(NpyIter *, char **)) \
+ PyArray_API[234])
+#define NpyIter_ResetBasePointers \
+ (*(int (*)(NpyIter *, char **, char **)) \
+ PyArray_API[235])
+#define NpyIter_ResetToIterIndexRange \
+ (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \
+ PyArray_API[236])
+#define NpyIter_GetNDim \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[237])
+#define NpyIter_GetNOp \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[238])
+#define NpyIter_GetIterNext \
+ (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \
+ PyArray_API[239])
+#define NpyIter_GetIterSize \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[240])
+#define NpyIter_GetIterIndexRange \
+ (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \
+ PyArray_API[241])
+#define NpyIter_GetIterIndex \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[242])
+#define NpyIter_GotoIterIndex \
+ (*(int (*)(NpyIter *, npy_intp)) \
+ PyArray_API[243])
+#define NpyIter_HasMultiIndex \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[244])
+#define NpyIter_GetShape \
+ (*(int (*)(NpyIter *, npy_intp *)) \
+ PyArray_API[245])
+#define NpyIter_GetGetMultiIndex \
+ (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \
+ PyArray_API[246])
+#define NpyIter_GotoMultiIndex \
+ (*(int (*)(NpyIter *, npy_intp const *)) \
+ PyArray_API[247])
+#define NpyIter_RemoveMultiIndex \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[248])
+#define NpyIter_HasIndex \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[249])
+#define NpyIter_IsBuffered \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[250])
+#define NpyIter_IsGrowInner \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[251])
+#define NpyIter_GetBufferSize \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[252])
+#define NpyIter_GetIndexPtr \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[253])
+#define NpyIter_GotoIndex \
+ (*(int (*)(NpyIter *, npy_intp)) \
+ PyArray_API[254])
+#define NpyIter_GetDataPtrArray \
+ (*(char ** (*)(NpyIter *)) \
+ PyArray_API[255])
+#define NpyIter_GetDescrArray \
+ (*(PyArray_Descr ** (*)(NpyIter *)) \
+ PyArray_API[256])
+#define NpyIter_GetOperandArray \
+ (*(PyArrayObject ** (*)(NpyIter *)) \
+ PyArray_API[257])
+#define NpyIter_GetIterView \
+ (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \
+ PyArray_API[258])
+#define NpyIter_GetReadFlags \
+ (*(void (*)(NpyIter *, char *)) \
+ PyArray_API[259])
+#define NpyIter_GetWriteFlags \
+ (*(void (*)(NpyIter *, char *)) \
+ PyArray_API[260])
+#define NpyIter_DebugPrint \
+ (*(void (*)(NpyIter *)) \
+ PyArray_API[261])
+#define NpyIter_IterationNeedsAPI \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[262])
+#define NpyIter_GetInnerFixedStrideArray \
+ (*(void (*)(NpyIter *, npy_intp *)) \
+ PyArray_API[263])
+#define NpyIter_RemoveAxis \
+ (*(int (*)(NpyIter *, int)) \
+ PyArray_API[264])
+#define NpyIter_GetAxisStrideArray \
+ (*(npy_intp * (*)(NpyIter *, int)) \
+ PyArray_API[265])
+#define NpyIter_RequiresBuffering \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[266])
+#define NpyIter_GetInitialDataPtrArray \
+ (*(char ** (*)(NpyIter *)) \
+ PyArray_API[267])
+#define NpyIter_CreateCompatibleStrides \
+ (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \
+ PyArray_API[268])
+#define PyArray_CastingConverter \
+ (*(int (*)(PyObject *, NPY_CASTING *)) \
+ PyArray_API[269])
+#define PyArray_CountNonzero \
+ (*(npy_intp (*)(PyArrayObject *)) \
+ PyArray_API[270])
+#define PyArray_PromoteTypes \
+ (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[271])
+#define PyArray_MinScalarType \
+ (*(PyArray_Descr * (*)(PyArrayObject *)) \
+ PyArray_API[272])
+#define PyArray_ResultType \
+ (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \
+ PyArray_API[273])
+#define PyArray_CanCastArrayTo \
+ (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \
+ PyArray_API[274])
+#define PyArray_CanCastTypeTo \
+ (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \
+ PyArray_API[275])
+#define PyArray_EinsteinSum \
+ (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \
+ PyArray_API[276])
+#define PyArray_NewLikeArray \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \
+ PyArray_API[277])
+#define PyArray_ConvertClipmodeSequence \
+ (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \
+ PyArray_API[279])
+#define PyArray_MatrixProduct2 \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \
+ PyArray_API[280])
+#define NpyIter_IsFirstVisit \
+ (*(npy_bool (*)(NpyIter *, int)) \
+ PyArray_API[281])
+#define PyArray_SetBaseObject \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[282])
+#define PyArray_CreateSortedStridePerm \
+ (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \
+ PyArray_API[283])
+#define PyArray_RemoveAxesInPlace \
+ (*(void (*)(PyArrayObject *, const npy_bool *)) \
+ PyArray_API[284])
+#define PyArray_DebugPrint \
+ (*(void (*)(PyArrayObject *)) \
+ PyArray_API[285])
+#define PyArray_FailUnlessWriteable \
+ (*(int (*)(PyArrayObject *, const char *)) \
+ PyArray_API[286])
+#define PyArray_SetUpdateIfCopyBase \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[287])
+#define PyDataMem_NEW \
+ (*(void * (*)(size_t)) \
+ PyArray_API[288])
+#define PyDataMem_FREE \
+ (*(void (*)(void *)) \
+ PyArray_API[289])
+#define PyDataMem_RENEW \
+ (*(void * (*)(void *, size_t)) \
+ PyArray_API[290])
+#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292])
+#define PyArray_Partition \
+ (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+ PyArray_API[296])
+#define PyArray_ArgPartition \
+ (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+ PyArray_API[297])
+#define PyArray_SelectkindConverter \
+ (*(int (*)(PyObject *, NPY_SELECTKIND *)) \
+ PyArray_API[298])
+#define PyDataMem_NEW_ZEROED \
+ (*(void * (*)(size_t, size_t)) \
+ PyArray_API[299])
+#define PyArray_CheckAnyScalarExact \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[300])
+#define PyArray_ResolveWritebackIfCopy \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[302])
+#define PyArray_SetWritebackIfCopyBase \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[303])
+
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+#define PyDataMem_SetHandler \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[304])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+#define PyDataMem_GetHandler \
+ (*(PyObject * (*)(void)) \
+ PyArray_API[305])
+#endif
+#define PyDataMem_DefaultHandler (*(PyObject* *)PyArray_API[306])
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyDatetime_ConvertDatetime64ToDatetimeStruct \
+ (*(int (*)(PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *)) \
+ PyArray_API[307])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyDatetime_ConvertDatetimeStructToDatetime64 \
+ (*(int (*)(PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *)) \
+ PyArray_API[308])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyDatetime_ConvertPyDateTimeToDatetimeStruct \
+ (*(int (*)(PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int)) \
+ PyArray_API[309])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyDatetime_GetDatetimeISO8601StrLen \
+ (*(int (*)(int, NPY_DATETIMEUNIT)) \
+ PyArray_API[310])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyDatetime_MakeISO8601Datetime \
+ (*(int (*)(npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING)) \
+ PyArray_API[311])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyDatetime_ParseISO8601Datetime \
+ (*(int (*)(char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *)) \
+ PyArray_API[312])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyString_load \
+ (*(int (*)(npy_string_allocator *, const npy_packed_static_string *, npy_static_string *)) \
+ PyArray_API[313])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyString_pack \
+ (*(int (*)(npy_string_allocator *, npy_packed_static_string *, const char *, size_t)) \
+ PyArray_API[314])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyString_pack_null \
+ (*(int (*)(npy_string_allocator *, npy_packed_static_string *)) \
+ PyArray_API[315])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyString_acquire_allocator \
+ (*(npy_string_allocator * (*)(const PyArray_StringDTypeObject *)) \
+ PyArray_API[316])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyString_acquire_allocators \
+ (*(void (*)(size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[])) \
+ PyArray_API[317])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyString_release_allocator \
+ (*(void (*)(npy_string_allocator *)) \
+ PyArray_API[318])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define NpyString_release_allocators \
+ (*(void (*)(size_t, npy_string_allocator *allocators[])) \
+ PyArray_API[319])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyArray_GetDefaultDescr \
+ (*(PyArray_Descr * (*)(PyArray_DTypeMeta *)) \
+ PyArray_API[361])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyArrayInitDTypeMeta_FromSpec \
+ (*(int (*)(PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *)) \
+ PyArray_API[362])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyArray_CommonDType \
+ (*(PyArray_DTypeMeta * (*)(PyArray_DTypeMeta *, PyArray_DTypeMeta *)) \
+ PyArray_API[363])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyArray_PromoteDTypeSequence \
+ (*(PyArray_DTypeMeta * (*)(npy_intp, PyArray_DTypeMeta **)) \
+ PyArray_API[364])
+#endif
+#define _PyDataType_GetArrFuncs \
+ (*(PyArray_ArrFuncs * (*)(const PyArray_Descr *)) \
+ PyArray_API[365])
+
+/*
+ * The DType classes are inconvenient for the Python generation so exposed
+ * manually in the header below (may be moved).
+ */
+#include "numpy/_public_dtype_api_table.h"
+
+#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
+static int
+_import_array(void)
+{
+ int st;
+ PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath");
+ PyObject *c_api;
+ if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) {
+ PyErr_Clear();
+ numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ }
+
+ if (numpy == NULL) {
+ return -1;
+ }
+
+ c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ return -1;
+ }
+
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+ Py_DECREF(c_api);
+ if (PyArray_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
+ return -1;
+ }
+
+ /*
+ * On exceedingly few platforms these sizes may not match, in which case
+ * We do not support older NumPy versions at all.
+ */
+ if (sizeof(Py_ssize_t) != sizeof(Py_intptr_t) &&
+ PyArray_RUNTIME_VERSION < NPY_2_0_API_VERSION) {
+ PyErr_Format(PyExc_RuntimeError,
+ "module compiled against NumPy 2.0 but running on NumPy 1.x. "
+ "Unfortunately, this is not supported on niche platforms where "
+ "`sizeof(size_t) != sizeof(inptr_t)`.");
+ }
+ /*
+ * Perform runtime check of C API version. As of now NumPy 2.0 is ABI
+ * backwards compatible (in the exposed feature subset!) for all practical
+ * purposes.
+ */
+ if (NPY_VERSION < PyArray_GetNDArrayCVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "ABI version 0x%x but this version of numpy is 0x%x", \
+ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
+ return -1;
+ }
+ PyArray_RUNTIME_VERSION = (int)PyArray_GetNDArrayCFeatureVersion();
+ if (NPY_FEATURE_VERSION > PyArray_RUNTIME_VERSION) {
+ PyErr_Format(PyExc_RuntimeError,
+ "module was compiled against NumPy C-API version 0x%x "
+ "(NumPy " NPY_FEATURE_VERSION_STRING ") "
+ "but the running NumPy has C-API version 0x%x. "
+ "Check the section C-API incompatibility at the "
+ "Troubleshooting ImportError section at "
+ "https://numpy.org/devdocs/user/troubleshooting-importerror.html"
+ "#c-api-incompatibility "
+ "for indications on how to solve this problem.",
+ (int)NPY_FEATURE_VERSION, PyArray_RUNTIME_VERSION);
+ return -1;
+ }
+
+ /*
+ * Perform runtime check of endianness and check it matches the one set by
+ * the headers (npy_endian.h) as a safeguard
+ */
+ st = PyArray_GetEndianness();
+ if (st == NPY_CPU_UNKNOWN_ENDIAN) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as unknown endian");
+ return -1;
+ }
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+ if (st != NPY_CPU_BIG) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as big endian, but "
+ "detected different endianness at runtime");
+ return -1;
+ }
+#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
+ if (st != NPY_CPU_LITTLE) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as little endian, but "
+ "detected different endianness at runtime");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+#define import_array() { \
+ if (_import_array() < 0) { \
+ PyErr_Print(); \
+ PyErr_SetString( \
+ PyExc_ImportError, \
+ "numpy._core.multiarray failed to import" \
+ ); \
+ return NULL; \
+ } \
+}
+
+#define import_array1(ret) { \
+ if (_import_array() < 0) { \
+ PyErr_Print(); \
+ PyErr_SetString( \
+ PyExc_ImportError, \
+ "numpy._core.multiarray failed to import" \
+ ); \
+ return ret; \
+ } \
+}
+
+#define import_array2(msg, ret) { \
+ if (_import_array() < 0) { \
+ PyErr_Print(); \
+ PyErr_SetString(PyExc_ImportError, msg); \
+ return ret; \
+ } \
+}
+
+#endif
+
+#endif
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.c b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.c
new file mode 100644
index 00000000..10fcbc45
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.c
@@ -0,0 +1,54 @@
+
+/* These pointers will be stored in the C-object for use in other
+ extension modules
+*/
+
+void *PyUFunc_API[] = {
+ (void *) &PyUFunc_Type,
+ (void *) PyUFunc_FromFuncAndData,
+ (void *) PyUFunc_RegisterLoopForType,
+ NULL,
+ (void *) PyUFunc_f_f_As_d_d,
+ (void *) PyUFunc_d_d,
+ (void *) PyUFunc_f_f,
+ (void *) PyUFunc_g_g,
+ (void *) PyUFunc_F_F_As_D_D,
+ (void *) PyUFunc_F_F,
+ (void *) PyUFunc_D_D,
+ (void *) PyUFunc_G_G,
+ (void *) PyUFunc_O_O,
+ (void *) PyUFunc_ff_f_As_dd_d,
+ (void *) PyUFunc_ff_f,
+ (void *) PyUFunc_dd_d,
+ (void *) PyUFunc_gg_g,
+ (void *) PyUFunc_FF_F_As_DD_D,
+ (void *) PyUFunc_DD_D,
+ (void *) PyUFunc_FF_F,
+ (void *) PyUFunc_GG_G,
+ (void *) PyUFunc_OO_O,
+ (void *) PyUFunc_O_O_method,
+ (void *) PyUFunc_OO_O_method,
+ (void *) PyUFunc_On_Om,
+ NULL,
+ NULL,
+ (void *) PyUFunc_clearfperr,
+ (void *) PyUFunc_getfperr,
+ NULL,
+ (void *) PyUFunc_ReplaceLoopBySignature,
+ (void *) PyUFunc_FromFuncAndDataAndSignature,
+ NULL,
+ (void *) PyUFunc_e_e,
+ (void *) PyUFunc_e_e_As_f_f,
+ (void *) PyUFunc_e_e_As_d_d,
+ (void *) PyUFunc_ee_e,
+ (void *) PyUFunc_ee_e_As_ff_f,
+ (void *) PyUFunc_ee_e_As_dd_d,
+ (void *) PyUFunc_DefaultTypeResolver,
+ (void *) PyUFunc_ValidateCasting,
+ (void *) PyUFunc_RegisterLoopForDescr,
+ (void *) PyUFunc_FromFuncAndDataAndSignatureAndIdentity,
+ (void *) PyUFunc_AddLoopFromSpec,
+ (void *) PyUFunc_AddPromoter,
+ (void *) PyUFunc_AddWrappingLoop,
+ (void *) PyUFunc_GiveFloatingpointErrors
+};
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.h
new file mode 100644
index 00000000..b05dce34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/__ufunc_api.h
@@ -0,0 +1,341 @@
+
+#ifdef _UMATHMODULE
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \
+ (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int);
+NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \
+ (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *);
+NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_d_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_f_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_g_g \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_F_F \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_D_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_G_G \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_O_O \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ff_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_dd_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_gg_g \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_DD_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_FF_F \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_GG_G \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_OO_O \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_O_O_method \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_OO_O_method \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_On_Om \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_clearfperr \
+ (void);
+NPY_NO_EXPORT int PyUFunc_getfperr \
+ (void);
+NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \
+ (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *);
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *);
+NPY_NO_EXPORT void PyUFunc_e_e \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \
+ (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyUFunc_ValidateCasting \
+ (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *);
+NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \
+ (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *);
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+ (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *);
+NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec \
+ (PyObject *, PyArrayMethod_Spec *);
+NPY_NO_EXPORT int PyUFunc_AddPromoter \
+ (PyObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT int PyUFunc_AddWrappingLoop \
+ (PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *);
+NPY_NO_EXPORT int PyUFunc_GiveFloatingpointErrors \
+ (const char *, int);
+
+#else
+
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
+#endif
+
+/* By default do not export API in an .so (was never the case on windows) */
+#ifndef NPY_API_SYMBOL_ATTRIBUTE
+ #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
+extern NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API;
+#else
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API;
+#else
+static void **PyUFunc_API=NULL;
+#endif
+#endif
+
+#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
+#define PyUFunc_FromFuncAndData \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int)) \
+ PyUFunc_API[1])
+#define PyUFunc_RegisterLoopForType \
+ (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \
+ PyUFunc_API[2])
+#define PyUFunc_f_f_As_d_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[4])
+#define PyUFunc_d_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[5])
+#define PyUFunc_f_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[6])
+#define PyUFunc_g_g \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[7])
+#define PyUFunc_F_F_As_D_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[8])
+#define PyUFunc_F_F \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[9])
+#define PyUFunc_D_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[10])
+#define PyUFunc_G_G \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[11])
+#define PyUFunc_O_O \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[12])
+#define PyUFunc_ff_f_As_dd_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[13])
+#define PyUFunc_ff_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[14])
+#define PyUFunc_dd_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[15])
+#define PyUFunc_gg_g \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[16])
+#define PyUFunc_FF_F_As_DD_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[17])
+#define PyUFunc_DD_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[18])
+#define PyUFunc_FF_F \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[19])
+#define PyUFunc_GG_G \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[20])
+#define PyUFunc_OO_O \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[21])
+#define PyUFunc_O_O_method \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[22])
+#define PyUFunc_OO_O_method \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[23])
+#define PyUFunc_On_Om \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[24])
+#define PyUFunc_clearfperr \
+ (*(void (*)(void)) \
+ PyUFunc_API[27])
+#define PyUFunc_getfperr \
+ (*(int (*)(void)) \
+ PyUFunc_API[28])
+#define PyUFunc_ReplaceLoopBySignature \
+ (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \
+ PyUFunc_API[30])
+#define PyUFunc_FromFuncAndDataAndSignature \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *)) \
+ PyUFunc_API[31])
+#define PyUFunc_e_e \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[33])
+#define PyUFunc_e_e_As_f_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[34])
+#define PyUFunc_e_e_As_d_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[35])
+#define PyUFunc_ee_e \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[36])
+#define PyUFunc_ee_e_As_ff_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[37])
+#define PyUFunc_ee_e_As_dd_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[38])
+#define PyUFunc_DefaultTypeResolver \
+ (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \
+ PyUFunc_API[39])
+#define PyUFunc_ValidateCasting \
+ (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *)) \
+ PyUFunc_API[40])
+#define PyUFunc_RegisterLoopForDescr \
+ (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \
+ PyUFunc_API[41])
+
+#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
+#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \
+ PyUFunc_API[42])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyUFunc_AddLoopFromSpec \
+ (*(int (*)(PyObject *, PyArrayMethod_Spec *)) \
+ PyUFunc_API[43])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyUFunc_AddPromoter \
+ (*(int (*)(PyObject *, PyObject *, PyObject *)) \
+ PyUFunc_API[44])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyUFunc_AddWrappingLoop \
+ (*(int (*)(PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *)) \
+ PyUFunc_API[45])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+#define PyUFunc_GiveFloatingpointErrors \
+ (*(int (*)(const char *, int)) \
+ PyUFunc_API[46])
+#endif
+
+static inline int
+_import_umath(void)
+{
+ PyObject *c_api;
+ PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath");
+ if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) {
+ PyErr_Clear();
+ numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ }
+
+ if (numpy == NULL) {
+ PyErr_SetString(PyExc_ImportError,
+ "_multiarray_umath failed to import");
+ return -1;
+ }
+
+ c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
+ return -1;
+ }
+
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+ Py_DECREF(c_api);
+ if (PyUFunc_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
+ return -1;
+ }
+ return 0;
+}
+
+#define import_umath() \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy._core.umath failed to import");\
+ return NULL;\
+ }\
+ } while(0)
+
+#define import_umath1(ret) \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy._core.umath failed to import");\
+ return ret;\
+ }\
+ } while(0)
+
+#define import_umath2(ret, msg) \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError, msg);\
+ return ret;\
+ }\
+ } while(0)
+
+#define import_ufunc() \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy._core.umath failed to import");\
+ }\
+ } while(0)
+
+
+static inline int
+PyUFunc_ImportUFuncAPI()
+{
+ if (NPY_UNLIKELY(PyUFunc_API == NULL)) {
+ import_umath1(-1);
+ }
+ return 0;
+}
+
+#endif
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h
new file mode 100644
index 00000000..b365cb50
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h
@@ -0,0 +1,90 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+#error You should not include this header directly
+#endif
+/*
+ * Private API (here for inline)
+ */
+static inline int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
+
+/*
+ * Update to next item of the iterator
+ *
+ * Note: this simply increment the coordinates vector, last dimension
+ * incremented first , i.e, for dimension 3
+ * ...
+ * -1, -1, -1
+ * -1, -1, 0
+ * -1, -1, 1
+ * ....
+ * -1, 0, -1
+ * -1, 0, 0
+ * ....
+ * 0, -1, -1
+ * 0, -1, 0
+ * ....
+ */
+#define _UPDATE_COORD_ITER(c) \
+ wb = iter->coordinates[c] < iter->bounds[c][1]; \
+ if (wb) { \
+ iter->coordinates[c] += 1; \
+ return 0; \
+ } \
+ else { \
+ iter->coordinates[c] = iter->bounds[c][0]; \
+ }
+
+static inline int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp i, wb;
+
+ for (i = iter->nd - 1; i >= 0; --i) {
+ _UPDATE_COORD_ITER(i)
+ }
+
+ return 0;
+}
+
+/*
+ * Version optimized for 2d arrays, manual loop unrolling
+ */
+static inline int
+_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp wb;
+
+ _UPDATE_COORD_ITER(1)
+ _UPDATE_COORD_ITER(0)
+
+ return 0;
+}
+#undef _UPDATE_COORD_ITER
+
+/*
+ * Advance to the next neighbour
+ */
+static inline int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
+{
+ _PyArrayNeighborhoodIter_IncrCoord (iter);
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
+
+/*
+ * Reset functions
+ */
+static inline int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp i;
+
+ for (i = 0; i < iter->nd; ++i) {
+ iter->coordinates[i] = iter->bounds[i][0];
+ }
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_numpyconfig.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_numpyconfig.h
new file mode 100644
index 00000000..16a4b443
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_numpyconfig.h
@@ -0,0 +1,33 @@
+#define NPY_HAVE_ENDIAN_H 1
+
+#define NPY_SIZEOF_SHORT 2
+#define NPY_SIZEOF_INT 4
+#define NPY_SIZEOF_LONG 8
+#define NPY_SIZEOF_FLOAT 4
+#define NPY_SIZEOF_COMPLEX_FLOAT 8
+#define NPY_SIZEOF_DOUBLE 8
+#define NPY_SIZEOF_COMPLEX_DOUBLE 16
+#define NPY_SIZEOF_LONGDOUBLE 16
+#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+#define NPY_SIZEOF_PY_INTPTR_T 8
+#define NPY_SIZEOF_INTP 8
+#define NPY_SIZEOF_UINTP 8
+#define NPY_SIZEOF_WCHAR_T 4
+#define NPY_SIZEOF_OFF_T 8
+#define NPY_SIZEOF_PY_LONG_LONG 8
+#define NPY_SIZEOF_LONGLONG 8
+
+/*
+ * Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines
+ * in this header) for better cross-compilation, so don't rename them without a
+ * good reason.
+ */
+#define NPY_NO_SMP 0
+
+#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
+#define NPY_ABI_VERSION 0x02000000
+#define NPY_API_VERSION 0x00000014
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS 1
+#endif
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_public_dtype_api_table.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_public_dtype_api_table.h
new file mode 100644
index 00000000..51f39054
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/_public_dtype_api_table.h
@@ -0,0 +1,86 @@
+/*
+ * Public exposure of the DType Classes. These are tricky to expose
+ * via the Python API, so they are exposed through this header for now.
+ *
+ * These definitions are only relevant for the public API and we reserve
+ * the slots 320-360 in the API table generation for this (currently).
+ *
+ * TODO: This file should be consolidated with the API table generation
+ * (although not sure the current generation is worth preserving).
+ */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_
+#define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_
+
+#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
+
+/* All of these require NumPy 2.0 support */
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+
+/*
+ * The type of the DType metaclass
+ */
+#define PyArrayDTypeMeta_Type (*(PyTypeObject *)(PyArray_API + 320)[0])
+/*
+ * NumPy's builtin DTypes:
+ */
+#define PyArray_BoolDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[1])
+/* Integers */
+#define PyArray_ByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[2])
+#define PyArray_UByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[3])
+#define PyArray_ShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[4])
+#define PyArray_UShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[5])
+#define PyArray_IntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[6])
+#define PyArray_UIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[7])
+#define PyArray_LongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[8])
+#define PyArray_ULongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[9])
+#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[10])
+#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[11])
+/* Integer aliases */
+#define PyArray_Int8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[12])
+#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[13])
+#define PyArray_Int16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[14])
+#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[15])
+#define PyArray_Int32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[16])
+#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[17])
+#define PyArray_Int64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[18])
+#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[19])
+#define PyArray_IntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[20])
+#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[21])
+/* Floats */
+#define PyArray_HalfDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[22])
+#define PyArray_FloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[23])
+#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[24])
+#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[25])
+/* Complex */
+#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[26])
+#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[27])
+#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[28])
+/* String/Bytes */
+#define PyArray_BytesDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[29])
+#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[30])
+/* Datetime/Timedelta */
+#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[31])
+#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[32])
+/* Object/Void */
+#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33])
+#define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34])
+/* Python types (used as markers for scalars) */
+#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35])
+#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36])
+#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37])
+/* Default integer type */
+#define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38])
+/* New non-legacy DTypes follow in the order they were added */
+#define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39])
+
+/* NOTE: offset 40 is free */
+
+/* Need to start with a larger offset again for the abstract classes: */
+#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366])
+#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367])
+#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368])
+
+#endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */
+
+#endif /* NPY_INTERNAL_BUILD */
+#endif /* NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayobject.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayobject.h
new file mode 100644
index 00000000..97d93590
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayobject.h
@@ -0,0 +1,7 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
+#define Py_ARRAYOBJECT_H
+
+#include "ndarrayobject.h"
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayscalars.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayscalars.h
new file mode 100644
index 00000000..ff048061
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/arrayscalars.h
@@ -0,0 +1,196 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
+
+#ifndef _MULTIARRAYMODULE
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+#endif
+
+
+typedef struct {
+ PyObject_HEAD
+ signed char obval;
+} PyByteScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ short obval;
+} PyShortScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ int obval;
+} PyIntScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ long obval;
+} PyLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_longlong obval;
+} PyLongLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned char obval;
+} PyUByteScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned short obval;
+} PyUShortScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned int obval;
+} PyUIntScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned long obval;
+} PyULongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_ulonglong obval;
+} PyULongLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_half obval;
+} PyHalfScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ float obval;
+} PyFloatScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ double obval;
+} PyDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_longdouble obval;
+} PyLongDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_cfloat obval;
+} PyCFloatScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_cdouble obval;
+} PyCDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_clongdouble obval;
+} PyCLongDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ PyObject * obval;
+} PyObjectScalarObject;
+
+typedef struct {
+ PyObject_HEAD
+ npy_datetime obval;
+ PyArray_DatetimeMetaData obmeta;
+} PyDatetimeScalarObject;
+
+typedef struct {
+ PyObject_HEAD
+ npy_timedelta obval;
+ PyArray_DatetimeMetaData obmeta;
+} PyTimedeltaScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ char obval;
+} PyScalarObject;
+
+#define PyStringScalarObject PyBytesObject
+#ifndef Py_LIMITED_API
+typedef struct {
+ /* note that the PyObject_HEAD macro lives right here */
+ PyUnicodeObject base;
+ Py_UCS4 *obval;
+ #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
+ char *buffer_fmt;
+ #endif
+} PyUnicodeScalarObject;
+#endif
+
+
+typedef struct {
+ PyObject_VAR_HEAD
+ char *obval;
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+ /* Internally use the subclass to allow accessing names/fields */
+ _PyArray_LegacyDescr *descr;
+#else
+ PyArray_Descr *descr;
+#endif
+ int flags;
+ PyObject *base;
+ #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
+ void *_buffer_info; /* private buffer info, tagged to allow warning */
+ #endif
+} PyVoidScalarObject;
+
+/* Macros
+ PyScalarObject
+ PyArrType_Type
+ are defined in ndarrayobject.h
+*/
+
+#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))
+#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
+#define PyArrayScalar_FromLong(i) \
+ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
+#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \
+ return Py_INCREF(PyArrayScalar_FromLong(i)), \
+ PyArrayScalar_FromLong(i)
+#define PyArrayScalar_RETURN_FALSE \
+ return Py_INCREF(PyArrayScalar_False), \
+ PyArrayScalar_False
+#define PyArrayScalar_RETURN_TRUE \
+ return Py_INCREF(PyArrayScalar_True), \
+ PyArrayScalar_True
+
+#define PyArrayScalar_New(cls) \
+ Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)
+#ifndef Py_LIMITED_API
+/* For the limited API, use PyArray_ScalarAsCtype instead */
+#define PyArrayScalar_VAL(obj, cls) \
+ ((Py##cls##ScalarObject *)obj)->obval
+#define PyArrayScalar_ASSIGN(obj, cls, val) \
+ PyArrayScalar_VAL(obj, cls) = val
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/dtype_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/dtype_api.h
new file mode 100644
index 00000000..b37c9fbb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/dtype_api.h
@@ -0,0 +1,480 @@
+/*
+ * The public DType API
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
+
+struct PyArrayMethodObject_tag;
+
+/*
+ * Largely opaque struct for DType classes (i.e. metaclass instances).
+ * The internal definition is currently in `ndarraytypes.h` (export is a bit
+ * more complex because `PyArray_Descr` is a DTypeMeta internally but not
+ * externally).
+ */
+#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
+
+#ifndef Py_LIMITED_API
+
+ typedef struct PyArray_DTypeMeta_tag {
+ PyHeapTypeObject super;
+
+ /*
+ * Most DTypes will have a singleton default instance, for the
+ * parametric legacy DTypes (bytes, string, void, datetime) this
+ * may be a pointer to the *prototype* instance?
+ */
+ PyArray_Descr *singleton;
+ /* Copy of the legacy DTypes type number, usually invalid. */
+ int type_num;
+
+ /* The type object of the scalar instances (may be NULL?) */
+ PyTypeObject *scalar_type;
+ /*
+ * DType flags to signal legacy, parametric, or
+ * abstract. But plenty of space for additional information/flags.
+ */
+ npy_uint64 flags;
+
+ /*
+ * Use indirection in order to allow a fixed size for this struct.
+ * A stable ABI size makes creating a static DType less painful
+ * while also ensuring flexibility for all opaque API (with one
+ * indirection due the pointer lookup).
+ */
+ void *dt_slots;
+ /* Allow growing (at the moment also beyond this) */
+ void *reserved[3];
+ } PyArray_DTypeMeta;
+
+#else
+
+typedef PyTypeObject PyArray_DTypeMeta;
+
+#endif /* Py_LIMITED_API */
+
+#endif /* not internal build */
+
+/*
+ * ******************************************************
+ * ArrayMethod API (Casting and UFuncs)
+ * ******************************************************
+ */
+
+
+typedef enum {
+ /* Flag for whether the GIL is required */
+ NPY_METH_REQUIRES_PYAPI = 1 << 0,
+ /*
+ * Some functions cannot set floating point error flags, this flag
+ * gives us the option (not requirement) to skip floating point error
+ * setup/check. No function should set error flags and ignore them
+ * since it would interfere with chaining operations (e.g. casting).
+ */
+ NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1,
+ /* Whether the method supports unaligned access (not runtime) */
+ NPY_METH_SUPPORTS_UNALIGNED = 1 << 2,
+ /*
+ * Used for reductions to allow reordering the operation. At this point
+ * assume that if set, it also applies to normal operations though!
+ */
+ NPY_METH_IS_REORDERABLE = 1 << 3,
+ /*
+ * Private flag for now for *logic* functions. The logical functions
+ * `logical_or` and `logical_and` can always cast the inputs to booleans
+ * "safely" (because that is how the cast to bool is defined).
+ * @seberg: I am not sure this is the best way to handle this, so its
+ * private for now (also it is very limited anyway).
+ * There is one "exception". NA aware dtypes cannot cast to bool
+ * (hopefully), so the `??->?` loop should error even with this flag.
+ * But a second NA fallback loop will be necessary.
+ */
+ _NPY_METH_FORCE_CAST_INPUTS = 1 << 17,
+
+ /* All flags which can change at runtime */
+ NPY_METH_RUNTIME_FLAGS = (
+ NPY_METH_REQUIRES_PYAPI |
+ NPY_METH_NO_FLOATINGPOINT_ERRORS),
+} NPY_ARRAYMETHOD_FLAGS;
+
+
+typedef struct PyArrayMethod_Context_tag {
+ /* The caller, which is typically the original ufunc. May be NULL */
+ PyObject *caller;
+ /* The method "self". Currently an opaque object. */
+ struct PyArrayMethodObject_tag *method;
+
+ /* Operand descriptors, filled in by resolve_descriptors */
+ PyArray_Descr *const *descriptors;
+ /* Structure may grow (this is harmless for DType authors) */
+} PyArrayMethod_Context;
+
+
+/*
+ * The main object for creating a new ArrayMethod. We use the typical `slots`
+ * mechanism used by the Python limited API (see below for the slot defs).
+ */
+typedef struct {
+ const char *name;
+ int nin, nout;
+ NPY_CASTING casting;
+ NPY_ARRAYMETHOD_FLAGS flags;
+ PyArray_DTypeMeta **dtypes;
+ PyType_Slot *slots;
+} PyArrayMethod_Spec;
+
+
+/*
+ * ArrayMethod slots
+ * -----------------
+ *
+ * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed
+ * but can be deprecated and arbitrarily extended.
+ */
+#define _NPY_METH_resolve_descriptors_with_scalars 1
+#define NPY_METH_resolve_descriptors 2
+#define NPY_METH_get_loop 3
+#define NPY_METH_get_reduction_initial 4
+/* specific loops for constructions/default get_loop: */
+#define NPY_METH_strided_loop 5
+#define NPY_METH_contiguous_loop 6
+#define NPY_METH_unaligned_strided_loop 7
+#define NPY_METH_unaligned_contiguous_loop 8
+#define NPY_METH_contiguous_indexed_loop 9
+#define _NPY_METH_static_data 10
+
+
+/*
+ * The resolve descriptors function, must be able to handle NULL values for
+ * all output (but not input) `given_descrs` and fill `loop_descrs`.
+ * Return -1 on error or 0 if the operation is not possible without an error
+ * set. (This may still be in flux.)
+ * Otherwise must return the "casting safety", for normal functions, this is
+ * almost always "safe" (or even "equivalent"?).
+ *
+ * `resolve_descriptors` is optional if all output DTypes are non-parametric.
+ */
+typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)(
+ /* "method" is currently opaque (necessary e.g. to wrap Python) */
+ struct PyArrayMethodObject_tag *method,
+ /* DTypes the method was created for */
+ PyArray_DTypeMeta *const *dtypes,
+ /* Input descriptors (instances). Outputs may be NULL. */
+ PyArray_Descr *const *given_descrs,
+ /* Exact loop descriptors to use, must not hold references on error */
+ PyArray_Descr **loop_descrs,
+ npy_intp *view_offset);
+
+
+/*
+ * Rarely needed, slightly more powerful version of `resolve_descriptors`.
+ * See also `PyArrayMethod_ResolveDescriptors` for details on shared arguments.
+ *
+ * NOTE: This function is private now as it is unclear how and what to pass
+ * exactly as additional information to allow dealing with the scalars.
+ * See also gh-24915.
+ */
+typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)(
+ struct PyArrayMethodObject_tag *method,
+ PyArray_DTypeMeta *const *dtypes,
+ /* Unlike above, these can have any DType and we may allow NULL. */
+ PyArray_Descr *const *given_descrs,
+ /*
+ * Input scalars or NULL. Only ever passed for python scalars.
+ * WARNING: In some cases, a loop may be explicitly selected and the
+ * value passed is not available (NULL) or does not have the
+ * expected type.
+ */
+ PyObject *const *input_scalars,
+ PyArray_Descr **loop_descrs,
+ npy_intp *view_offset);
+
+
+
+typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,
+ char *const *data, const npy_intp *dimensions, const npy_intp *strides,
+ NpyAuxData *transferdata);
+
+
+typedef int (PyArrayMethod_GetLoop)(
+ PyArrayMethod_Context *context,
+ int aligned, int move_references,
+ const npy_intp *strides,
+ PyArrayMethod_StridedLoop **out_loop,
+ NpyAuxData **out_transferdata,
+ NPY_ARRAYMETHOD_FLAGS *flags);
+
+/**
+ * Query an ArrayMethod for the initial value for use in reduction.
+ *
+ * @param context The arraymethod context, mainly to access the descriptors.
+ * @param reduction_is_empty Whether the reduction is empty. When it is, the
+ * value returned may differ. In this case it is a "default" value that
+ * may differ from the "identity" value normally used. For example:
+ * - `0.0` is the default for `sum([])`. But `-0.0` is the correct
+ * identity otherwise as it preserves the sign for `sum([-0.0])`.
+ * - We use no identity for object, but return the default of `0` and `1`
+ * for the empty `sum([], dtype=object)` and `prod([], dtype=object)`.
+ * This allows `np.sum(np.array(["a", "b"], dtype=object))` to work.
+ * - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN`
+ * not a good *default* when there are no items.
+ * @param initial Pointer to initial data to be filled (if possible)
+ *
+ * @returns -1, 0, or 1 indicating error, no initial value, and initial being
+ * successfully filled. Errors must not be given where 0 is correct, NumPy
+ * may call this even when not strictly necessary.
+ */
+typedef int (PyArrayMethod_GetReductionInitial)(
+ PyArrayMethod_Context *context, npy_bool reduction_is_empty,
+ void *initial);
+
+/*
+ * The following functions are only used by the wrapping array method defined
+ * in umath/wrapping_array_method.c
+ */
+
+
+/*
+ * The function to convert the given descriptors (passed in to
+ * `resolve_descriptors`) and translates them for the wrapped loop.
+ * The new descriptors MUST be viewable with the old ones, `NULL` must be
+ * supported (for outputs) and should normally be forwarded.
+ *
+ * The function must clean up on error.
+ *
+ * NOTE: We currently assume that this translation gives "viewable" results.
+ * I.e. there is no additional casting related to the wrapping process.
+ * In principle that could be supported, but not sure it is useful.
+ * This currently also means that e.g. alignment must apply identically
+ * to the new dtypes.
+ *
+ * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast`
+ * there is no way to "pass out" the result of this function. This means
+ * it will be called twice for every ufunc call.
+ * (I am considering including `auxdata` as an "optional" parameter to
+ * `resolve_descriptors`, so that it can be filled there if not NULL.)
+ */
+typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout,
+ PyArray_DTypeMeta *const wrapped_dtypes[],
+ PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]);
+
+/**
+ * The function to convert the actual loop descriptors (as returned by the
+ * original `resolve_descriptors` function) to the ones the output array
+ * should use.
+ * This function must return "viewable" types, it must not mutate them in any
+ * form that would break the inner-loop logic. Does not need to support NULL.
+ *
+ * The function must clean up on error.
+ *
+ * @param nin Number of input arguments
+ * @param nout Number of output arguments
+ * @param new_dtypes The DTypes of the output (usually probably not needed)
+ * @param given_descrs Original given_descrs to the resolver, necessary to
+ * fetch any information related to the new dtypes from the original.
+ * @param original_descrs The `loop_descrs` returned by the wrapped loop.
+ * @param loop_descrs The output descriptors, compatible to `original_descrs`.
+ *
+ * @returns 0 on success, -1 on failure.
+ */
+typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout,
+ PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[],
+ PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]);
+
+
+
+/*
+ * A traverse loop working on a single array. This is similar to the general
+ * strided-loop function. This is designed for loops that need to visit every
+ * element of a single array.
+ *
+ * Currently this is used for array clearing, via the NPY_DT_get_clear_loop
+ * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook.
+ * These are most useful for handling arrays storing embedded references to
+ * python objects or heap-allocated data.
+ *
+ * The `void *traverse_context` is passed in because we may need to pass in
+ * Interpreter state or similar in the future, but we don't want to pass in
+ * a full context (with pointers to dtypes, method, caller which all make
+ * no sense for a traverse function).
+ *
+ * We assume for now that this context can be just passed through in the
+ * the future (for structured dtypes).
+ *
+ */
+typedef int (PyArrayMethod_TraverseLoop)(
+ void *traverse_context, const PyArray_Descr *descr, char *data,
+ npy_intp size, npy_intp stride, NpyAuxData *auxdata);
+
+
+/*
+ * Simplified get_loop function specific to dtype traversal
+ *
+ * It should set the flags needed for the traversal loop and set out_loop to the
+ * loop function, which must be a valid PyArrayMethod_TraverseLoop
+ * pointer. Currently this is used for zero-filling and clearing arrays storing
+ * embedded references.
+ *
+ */
+typedef int (PyArrayMethod_GetTraverseLoop)(
+ void *traverse_context, const PyArray_Descr *descr,
+ int aligned, npy_intp fixed_stride,
+ PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata,
+ NPY_ARRAYMETHOD_FLAGS *flags);
+
+
+/*
+ * Type of the C promoter function, which must be wrapped into a
+ * PyCapsule with name "numpy._ufunc_promoter".
+ *
+ * Note that currently the output dtypes are always NULL unless they are
+ * also part of the signature. This is an implementation detail and could
+ * change in the future. However, in general promoters should not have a
+ * need for output dtypes.
+ * (There are potential use-cases, these are currently unsupported.)
+ */
+typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc,
+ PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+/*
+ * ****************************
+ * DTYPE API
+ * ****************************
+ */
+
+#define NPY_DT_ABSTRACT 1 << 1
+#define NPY_DT_PARAMETRIC 1 << 2
+#define NPY_DT_NUMERIC 1 << 3
+
+/*
+ * These correspond to slots in the NPY_DType_Slots struct and must
+ * be in the same order as the members of that struct. If new slots
+ * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also
+ * be updated
+ */
+
+#define NPY_DT_discover_descr_from_pyobject 1
+// this slot is considered private because its API hasn't been decided
+#define _NPY_DT_is_known_scalar_type 2
+#define NPY_DT_default_descr 3
+#define NPY_DT_common_dtype 4
+#define NPY_DT_common_instance 5
+#define NPY_DT_ensure_canonical 6
+#define NPY_DT_setitem 7
+#define NPY_DT_getitem 8
+#define NPY_DT_get_clear_loop 9
+#define NPY_DT_get_fill_zero_loop 10
+#define NPY_DT_finalize_descr 11
+
+// These PyArray_ArrFunc slots will be deprecated and replaced eventually
+// getitem and setitem can be defined as a performance optimization;
+// by default the user dtypes call `legacy_getitem_using_DType` and
+// `legacy_setitem_using_DType`, respectively. This functionality is
+// only supported for basic NumPy DTypes.
+
+
+// used to separate dtype slots from arrfuncs slots
+// intended only for internal use but defined here for clarity
+#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10)
+
+// Cast is disabled
+// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET
+
+#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET
+
+// Copyswap is disabled
+// #define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET
+
+// Casting related slots are disabled. See
+// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163
+// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET
+
+// These are deprecated in NumPy 1.19, so are disabled here.
+// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET
+
+
+// TODO: These slots probably still need some thought, and/or a way to "grow"?
+typedef struct {
+ PyTypeObject *typeobj; /* type of python scalar or NULL */
+ int flags; /* flags, including parametric and abstract */
+ /* NULL terminated cast definitions. Use NULL for the newly created DType */
+ PyArrayMethod_Spec **casts;
+ PyType_Slot *slots;
+ /* Baseclass or NULL (will always subclass `np.dtype`) */
+ PyTypeObject *baseclass;
+} PyArrayDTypeMeta_Spec;
+
+
+typedef PyArray_Descr *(PyArrayDTypeMeta_DiscoverDescrFromPyobject)(
+ PyArray_DTypeMeta *cls, PyObject *obj);
+
+/*
+ * Before making this public, we should decide whether it should pass
+ * the type, or allow looking at the object. A possible use-case:
+ * `np.array(np.array([0]), dtype=np.ndarray)`
+ * Could consider arrays that are not `dtype=ndarray` "scalars".
+ */
+typedef int (PyArrayDTypeMeta_IsKnownScalarType)(
+ PyArray_DTypeMeta *cls, PyTypeObject *obj);
+
+typedef PyArray_Descr *(PyArrayDTypeMeta_DefaultDescriptor)(PyArray_DTypeMeta *cls);
+typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)(
+ PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);
+
+
+/*
+ * Convenience utility for getting a reference to the DType metaclass associated
+ * with a dtype instance.
+ */
+#define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr))
+
+static inline PyArray_DTypeMeta *
+NPY_DT_NewRef(PyArray_DTypeMeta *o) {
+ Py_INCREF((PyObject *)o);
+ return o;
+}
+
+
+typedef PyArray_Descr *(PyArrayDTypeMeta_CommonInstance)(
+ PyArray_Descr *dtype1, PyArray_Descr *dtype2);
+typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype);
+/*
+ * Returns either a new reference to *dtype* or a new descriptor instance
+ * initialized with the same parameters as *dtype*. The caller cannot know
+ * which choice a dtype will make. This function is called just before the
+ * array buffer is created for a newly created array, it is not called for
+ * views and the descriptor returned by this function is attached to the array.
+ */
+typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype);
+
+/*
+ * TODO: These two functions are currently only used for experimental DType
+ * API support. Their relation should be "reversed": NumPy should
+ * always use them internally.
+ * There are open points about "casting safety" though, e.g. setting
+ * elements is currently always unsafe.
+ */
+typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *);
+typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *);
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/halffloat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/halffloat.h
new file mode 100644
index 00000000..95040166
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/halffloat.h
@@ -0,0 +1,70 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Half-precision routines
+ */
+
+/* Conversions */
+float npy_half_to_float(npy_half h);
+double npy_half_to_double(npy_half h);
+npy_half npy_float_to_half(float f);
+npy_half npy_double_to_half(double d);
+/* Comparisons */
+int npy_half_eq(npy_half h1, npy_half h2);
+int npy_half_ne(npy_half h1, npy_half h2);
+int npy_half_le(npy_half h1, npy_half h2);
+int npy_half_lt(npy_half h1, npy_half h2);
+int npy_half_ge(npy_half h1, npy_half h2);
+int npy_half_gt(npy_half h1, npy_half h2);
+/* faster *_nonan variants for when you know h1 and h2 are not NaN */
+int npy_half_eq_nonan(npy_half h1, npy_half h2);
+int npy_half_lt_nonan(npy_half h1, npy_half h2);
+int npy_half_le_nonan(npy_half h1, npy_half h2);
+/* Miscellaneous functions */
+int npy_half_iszero(npy_half h);
+int npy_half_isnan(npy_half h);
+int npy_half_isinf(npy_half h);
+int npy_half_isfinite(npy_half h);
+int npy_half_signbit(npy_half h);
+npy_half npy_half_copysign(npy_half x, npy_half y);
+npy_half npy_half_spacing(npy_half h);
+npy_half npy_half_nextafter(npy_half x, npy_half y);
+npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus);
+
+/*
+ * Half-precision constants
+ */
+
+#define NPY_HALF_ZERO (0x0000u)
+#define NPY_HALF_PZERO (0x0000u)
+#define NPY_HALF_NZERO (0x8000u)
+#define NPY_HALF_ONE (0x3c00u)
+#define NPY_HALF_NEGONE (0xbc00u)
+#define NPY_HALF_PINF (0x7c00u)
+#define NPY_HALF_NINF (0xfc00u)
+#define NPY_HALF_NAN (0x7e00u)
+
+#define NPY_MAX_HALF (0x7bffu)
+
+/*
+ * Bit-level conversions
+ */
+
+npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);
+npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);
+npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);
+npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarrayobject.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarrayobject.h
new file mode 100644
index 00000000..f06bafe5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarrayobject.h
@@ -0,0 +1,304 @@
+/*
+ * DON'T INCLUDE THIS DIRECTLY.
+ */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include "ndarraytypes.h"
+#include "dtype_api.h"
+
+/* Includes the "function" C-API -- these are all stored in a
+ list of pointers --- one for each file
+ The two lists are concatenated into one in multiarray.
+
+ They are available as import_array()
+*/
+
+#include "__multiarray_api.h"
+
+/*
+ * Include any definitions which are defined differently for 1.x and 2.x
+ * (Symbols only available on 2.x are not there, but rather guarded.)
+ */
+#include "npy_2_compat.h"
+
+/* C-API that requires previous API to be defined */
+
+#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
+
+#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
+#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
+
+#define PyArray_HasArrayInterfaceType(op, type, context, out) \
+ ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \
+ (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \
+ (((out)=PyArray_FromArrayAttr(op, type, context)) != \
+ Py_NotImplemented))
+
+#define PyArray_HasArrayInterface(op, out) \
+ PyArray_HasArrayInterfaceType(op, NULL, NULL, out)
+
+#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \
+ (PyArray_NDIM((PyArrayObject *)op) == 0))
+
+#define PyArray_IsScalar(obj, cls) \
+ (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))
+
+#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
+ PyArray_IsZeroDim(m))
+#define PyArray_IsPythonNumber(obj) \
+ (PyFloat_Check(obj) || PyComplex_Check(obj) || \
+ PyLong_Check(obj) || PyBool_Check(obj))
+#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \
+ || PyArray_IsScalar((obj), Integer))
+#define PyArray_IsPythonScalar(obj) \
+ (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \
+ PyUnicode_Check(obj))
+
+#define PyArray_IsAnyScalar(obj) \
+ (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
+
+#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \
+ PyArray_CheckScalar(obj))
+
+
+#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \
+ Py_INCREF(m), (m) : \
+ (PyArrayObject *)(PyArray_Copy(m)))
+
+#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \
+ PyArray_CompareLists(PyArray_DIMS(a1), \
+ PyArray_DIMS(a2), \
+ PyArray_NDIM(a1)))
+
+#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
+#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
+#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)
+
+#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \
+ NULL)
+
+#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \
+ PyArray_DescrFromType(type), 0, 0, 0, NULL)
+
+#define PyArray_FROM_OTF(m, type, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
+ (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+ ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)
+
+#define PyArray_FROMANY(m, type, min, max, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \
+ (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+ (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)
+
+#define PyArray_ZEROS(m, dims, type, is_f_order) \
+ PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_EMPTY(m, dims, type, is_f_order) \
+ PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \
+ PyArray_NBYTES(obj))
+
+#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_DEFAULT, NULL)
+
+#define PyArray_EquivArrTypes(a1, a2) \
+ PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
+
+#define PyArray_EquivByteorders(b1, b2) \
+ (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
+
+#define PyArray_SimpleNew(nd, dims, typenum) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
+
+#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \
+ data, 0, NPY_ARRAY_CARRAY, NULL)
+
+#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
+ PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \
+ NULL, NULL, 0, NULL)
+
+#define PyArray_ToScalar(data, arr) \
+ PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)
+
+
+/* These might be faster without the dereferencing of obj
+ going on inside -- of course an optimizing compiler should
+ inline the constants inside a for loop making it a moot point
+*/
+
+#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0]))
+
+#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1]))
+
+#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1] + \
+ (k)*PyArray_STRIDES(obj)[2]))
+
+#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1] + \
+ (k)*PyArray_STRIDES(obj)[2] + \
+ (l)*PyArray_STRIDES(obj)[3]))
+
+static inline void
+PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
+{
+ PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
+ if (fa && fa->base) {
+ if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) {
+ PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
+ Py_DECREF(fa->base);
+ fa->base = NULL;
+ PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
+ }
+ }
+}
+
+#define PyArray_DESCR_REPLACE(descr) do { \
+ PyArray_Descr *_new_; \
+ _new_ = PyArray_DescrNew(descr); \
+ Py_XDECREF(descr); \
+ descr = _new_; \
+ } while(0)
+
+/* Copy should always return contiguous array */
+#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)
+
+#define PyArray_FromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_BEHAVED | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_DEFAULT | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_ENSURECOPY | \
+ NPY_ARRAY_DEFAULT | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_Cast(mp, type_num) \
+ PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
+
+#define PyArray_Take(ap, items, axis) \
+ PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)
+
+#define PyArray_Put(ap, items, values) \
+ PyArray_PutTo(ap, items, values, NPY_RAISE)
+
+
+/*
+ Check to see if this key in the dictionary is the "title"
+ entry of the tuple (i.e. a duplicate dictionary entry in the fields
+ dict).
+*/
+
+static inline int
+NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
+{
+ PyObject *title;
+ if (PyTuple_Size(value) != 3) {
+ return 0;
+ }
+ title = PyTuple_GetItem(value, 2);
+ if (key == title) {
+ return 1;
+ }
+#ifdef PYPY_VERSION
+ /*
+ * On PyPy, dictionary keys do not always preserve object identity.
+ * Fall back to comparison by value.
+ */
+ if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
+ return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
+ }
+#endif
+ return 0;
+}
+
+/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */
+#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))
+
+#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
+#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
+
+
+/*
+ * These macros and functions unfortunately require runtime version checks
+ * that are only defined in `npy_2_compat.h`. For that reasons they cannot be
+ * part of `ndarraytypes.h` which tries to be self contained.
+ */
+
+static inline npy_intp
+PyArray_ITEMSIZE(const PyArrayObject *arr)
+{
+ return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr);
+}
+
+#define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL)
+#define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL)
+#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \
+ !PyDataType_HASFIELDS(dtype))
+
+#define PyDataType_FLAGCHK(dtype, flag) \
+ ((PyDataType_FLAGS(dtype) & (flag)) == (flag))
+
+#define PyDataType_REFCHK(dtype) \
+ PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)
+
+#define NPY_BEGIN_THREADS_DESCR(dtype) \
+ do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
+ NPY_BEGIN_THREADS;} while (0);
+
+#define NPY_END_THREADS_DESCR(dtype) \
+ do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
+ NPY_END_THREADS; } while (0);
+
+#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
+/* The internal copy of this is now defined in `dtypemeta.h` */
+/*
+ * `PyArray_Scalar` is the same as this function but converts will convert
+ * most NumPy types to Python scalars.
+ */
+static inline PyObject *
+PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
+{
+ return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem(
+ (void *)itemptr, (PyArrayObject *)arr);
+}
+
+/*
+ * SETITEM should only be used if it is known that the value is a scalar
+ * and of a type understood by the arrays dtype.
+ * Use `PyArray_Pack` if the value may be of a different dtype.
+ */
+static inline int
+PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
+{
+ return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr);
+}
+#endif /* not internal */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarraytypes.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarraytypes.h
new file mode 100644
index 00000000..baa42406
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ndarraytypes.h
@@ -0,0 +1,1950 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_
+
+#include "npy_common.h"
+#include "npy_endian.h"
+#include "npy_cpu.h"
+#include "utils.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN
+
+/* Always allow threading unless it was explicitly disabled at build time */
+#if !NPY_NO_SMP
+ #define NPY_ALLOW_THREADS 1
+#else
+ #define NPY_ALLOW_THREADS 0
+#endif
+
+#ifndef __has_extension
+#define __has_extension(x) 0
+#endif
+
+/*
+ * There are several places in the code where an array of dimensions
+ * is allocated statically. This is the size of that static
+ * allocation.
+ *
+ * The array creation itself could have arbitrary dimensions but all
+ * the places where static allocation is used would need to be changed
+ * to dynamic (including inside of several structures)
+ *
+ * As of NumPy 2.0, we strongly discourage the downstream use of NPY_MAXDIMS,
+ * but since auditing everything seems a big ask, define it as 64.
+ * A future version could:
+ * - Increase or remove the limit and require recompilation (like 2.0 did)
+ * - Deprecate or remove the macro but keep the limit (at basically any time)
+ */
+#define NPY_MAXDIMS 64
+/* We cannot change this as it would break ABI: */
+#define NPY_MAXDIMS_LEGACY_ITERS 32
+/* NPY_MAXARGS is version dependent and defined in npy_2_compat.h */
+
+/* Used for Converter Functions "O&" code in ParseTuple */
+#define NPY_FAIL 0
+#define NPY_SUCCEED 1
+
+
+enum NPY_TYPES { NPY_BOOL=0,
+ NPY_BYTE, NPY_UBYTE,
+ NPY_SHORT, NPY_USHORT,
+ NPY_INT, NPY_UINT,
+ NPY_LONG, NPY_ULONG,
+ NPY_LONGLONG, NPY_ULONGLONG,
+ NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
+ NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
+ NPY_OBJECT=17,
+ NPY_STRING, NPY_UNICODE,
+ NPY_VOID,
+ /*
+ * New 1.6 types appended, may be integrated
+ * into the above in 2.0.
+ */
+ NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,
+
+ NPY_CHAR, /* Deprecated, will raise if used */
+
+ /* The number of *legacy* dtypes */
+ NPY_NTYPES_LEGACY=24,
+
+ /* assign a high value to avoid changing this in the
+ future when new dtypes are added */
+ NPY_NOTYPE=25,
+
+ NPY_USERDEF=256, /* leave room for characters */
+
+ /* The number of types not including the new 1.6 types */
+ NPY_NTYPES_ABI_COMPATIBLE=21,
+
+ /*
+ * New DTypes which do not share the legacy layout
+ * (added after NumPy 2.0). VSTRING is the first of these
+ * we may open up a block for user-defined dtypes in the
+ * future.
+ */
+ NPY_VSTRING=2056,
+};
+
+
+/* basetype array priority */
+#define NPY_PRIORITY 0.0
+
+/* default subtype priority */
+#define NPY_SUBTYPE_PRIORITY 1.0
+
+/* default scalar priority */
+#define NPY_SCALAR_PRIORITY -1000000.0
+
+/* How many floating point types are there (excluding half) */
+#define NPY_NUM_FLOATTYPE 3
+
+/*
+ * These characters correspond to the array type and the struct
+ * module
+ */
+
+enum NPY_TYPECHAR {
+ NPY_BOOLLTR = '?',
+ NPY_BYTELTR = 'b',
+ NPY_UBYTELTR = 'B',
+ NPY_SHORTLTR = 'h',
+ NPY_USHORTLTR = 'H',
+ NPY_INTLTR = 'i',
+ NPY_UINTLTR = 'I',
+ NPY_LONGLTR = 'l',
+ NPY_ULONGLTR = 'L',
+ NPY_LONGLONGLTR = 'q',
+ NPY_ULONGLONGLTR = 'Q',
+ NPY_HALFLTR = 'e',
+ NPY_FLOATLTR = 'f',
+ NPY_DOUBLELTR = 'd',
+ NPY_LONGDOUBLELTR = 'g',
+ NPY_CFLOATLTR = 'F',
+ NPY_CDOUBLELTR = 'D',
+ NPY_CLONGDOUBLELTR = 'G',
+ NPY_OBJECTLTR = 'O',
+ NPY_STRINGLTR = 'S',
+ NPY_DEPRECATED_STRINGLTR2 = 'a',
+ NPY_UNICODELTR = 'U',
+ NPY_VOIDLTR = 'V',
+ NPY_DATETIMELTR = 'M',
+ NPY_TIMEDELTALTR = 'm',
+ NPY_CHARLTR = 'c',
+
+ /*
+ * New non-legacy DTypes
+ */
+ NPY_VSTRINGLTR = 'T',
+
+ /*
+ * Note, we removed `NPY_INTPLTR` due to changing its definition
+ * to 'n', rather than 'p'. On any typical platform this is the
+ * same integer. 'n' should be used for the `np.intp` with the same
+ * size as `size_t` while 'p' remains pointer sized.
+ *
+ * 'p', 'P', 'n', and 'N' are valid and defined explicitly
+ * in `arraytypes.c.src`.
+ */
+
+ /*
+ * These are for dtype 'kinds', not dtype 'typecodes'
+ * as the above are for.
+ */
+ NPY_GENBOOLLTR ='b',
+ NPY_SIGNEDLTR = 'i',
+ NPY_UNSIGNEDLTR = 'u',
+ NPY_FLOATINGLTR = 'f',
+ NPY_COMPLEXLTR = 'c',
+
+};
+
+/*
+ * Changing this may break Numpy API compatibility
+ * due to changing offsets in PyArray_ArrFuncs, so be
+ * careful. Here we have reused the mergesort slot for
+ * any kind of stable sort, the actual implementation will
+ * depend on the data type.
+ */
+typedef enum {
+ _NPY_SORT_UNDEFINED=-1,
+ NPY_QUICKSORT=0,
+ NPY_HEAPSORT=1,
+ NPY_MERGESORT=2,
+ NPY_STABLESORT=2,
+} NPY_SORTKIND;
+#define NPY_NSORTS (NPY_STABLESORT + 1)
+
+
+typedef enum {
+ NPY_INTROSELECT=0
+} NPY_SELECTKIND;
+#define NPY_NSELECTS (NPY_INTROSELECT + 1)
+
+
+typedef enum {
+ NPY_SEARCHLEFT=0,
+ NPY_SEARCHRIGHT=1
+} NPY_SEARCHSIDE;
+#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1)
+
+
+typedef enum {
+ NPY_NOSCALAR=-1,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+} NPY_SCALARKIND;
+#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1)
+
+/* For specifying array memory layout or iteration order */
+typedef enum {
+ /* Fortran order if inputs are all Fortran, C otherwise */
+ NPY_ANYORDER=-1,
+ /* C order */
+ NPY_CORDER=0,
+ /* Fortran order */
+ NPY_FORTRANORDER=1,
+ /* An order as close to the inputs as possible */
+ NPY_KEEPORDER=2
+} NPY_ORDER;
+
+/* For specifying allowed casting in operations which support it */
+typedef enum {
+ _NPY_ERROR_OCCURRED_IN_CAST = -1,
+ /* Only allow identical types */
+ NPY_NO_CASTING=0,
+ /* Allow identical and byte swapped types */
+ NPY_EQUIV_CASTING=1,
+ /* Only allow safe casts */
+ NPY_SAFE_CASTING=2,
+ /* Allow safe casts or casts within the same kind */
+ NPY_SAME_KIND_CASTING=3,
+ /* Allow any casts */
+ NPY_UNSAFE_CASTING=4,
+} NPY_CASTING;
+
+typedef enum {
+ NPY_CLIP=0,
+ NPY_WRAP=1,
+ NPY_RAISE=2
+} NPY_CLIPMODE;
+
+typedef enum {
+ NPY_VALID=0,
+ NPY_SAME=1,
+ NPY_FULL=2
+} NPY_CORRELATEMODE;
+
+/* The special not-a-time (NaT) value */
+#define NPY_DATETIME_NAT NPY_MIN_INT64
+
+/*
+ * Upper bound on the length of a DATETIME ISO 8601 string
+ * YEAR: 21 (64-bit year)
+ * MONTH: 3
+ * DAY: 3
+ * HOURS: 3
+ * MINUTES: 3
+ * SECONDS: 3
+ * ATTOSECONDS: 1 + 3*6
+ * TIMEZONE: 5
+ * NULL TERMINATOR: 1
+ */
+#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1)
+
+/* The FR in the unit names stands for frequency */
+typedef enum {
+ /* Force signed enum type, must be -1 for code compatibility */
+ NPY_FR_ERROR = -1, /* error or undetermined */
+
+ /* Start of valid units */
+ NPY_FR_Y = 0, /* Years */
+ NPY_FR_M = 1, /* Months */
+ NPY_FR_W = 2, /* Weeks */
+ /* Gap where 1.6 NPY_FR_B (value 3) was */
+ NPY_FR_D = 4, /* Days */
+ NPY_FR_h = 5, /* hours */
+ NPY_FR_m = 6, /* minutes */
+ NPY_FR_s = 7, /* seconds */
+ NPY_FR_ms = 8, /* milliseconds */
+ NPY_FR_us = 9, /* microseconds */
+ NPY_FR_ns = 10, /* nanoseconds */
+ NPY_FR_ps = 11, /* picoseconds */
+ NPY_FR_fs = 12, /* femtoseconds */
+ NPY_FR_as = 13, /* attoseconds */
+ NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */
+} NPY_DATETIMEUNIT;
+
+/*
+ * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS
+ * is technically one more than the actual number of units.
+ */
+#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)
+#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC
+
+/*
+ * Business day conventions for mapping invalid business
+ * days to valid business days.
+ */
+typedef enum {
+ /* Go forward in time to the following business day. */
+ NPY_BUSDAY_FORWARD,
+ NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD,
+ /* Go backward in time to the preceding business day. */
+ NPY_BUSDAY_BACKWARD,
+ NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD,
+ /*
+ * Go forward in time to the following business day, unless it
+ * crosses a month boundary, in which case go backward
+ */
+ NPY_BUSDAY_MODIFIEDFOLLOWING,
+ /*
+ * Go backward in time to the preceding business day, unless it
+ * crosses a month boundary, in which case go forward.
+ */
+ NPY_BUSDAY_MODIFIEDPRECEDING,
+ /* Produce a NaT for non-business days. */
+ NPY_BUSDAY_NAT,
+ /* Raise an exception for non-business days. */
+ NPY_BUSDAY_RAISE
+} NPY_BUSDAY_ROLL;
+
+
+/************************************************************
+ * NumPy Auxiliary Data for inner loops, sort functions, etc.
+ ************************************************************/
+
+/*
+ * When creating an auxiliary data struct, this should always appear
+ * as the first member, like this:
+ *
+ * typedef struct {
+ * NpyAuxData base;
+ * double constant;
+ * } constant_multiplier_aux_data;
+ */
+typedef struct NpyAuxData_tag NpyAuxData;
+
+/* Function pointers for freeing or cloning auxiliary data */
+typedef void (NpyAuxData_FreeFunc) (NpyAuxData *);
+typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *);
+
+struct NpyAuxData_tag {
+ NpyAuxData_FreeFunc *free;
+ NpyAuxData_CloneFunc *clone;
+ /* To allow for a bit of expansion without breaking the ABI */
+ void *reserved[2];
+};
+
+/* Macros to use for freeing and cloning auxiliary data */
+#define NPY_AUXDATA_FREE(auxdata) \
+ do { \
+ if ((auxdata) != NULL) { \
+ (auxdata)->free(auxdata); \
+ } \
+ } while(0)
+#define NPY_AUXDATA_CLONE(auxdata) \
+ ((auxdata)->clone(auxdata))
+
+#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr);
+#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr);
+
+/*
+* Macros to define how array, and dimension/strides data is
+* allocated. These should be made private
+*/
+
+#define NPY_USE_PYMEM 1
+
+
+#if NPY_USE_PYMEM == 1
+/* use the Raw versions which are safe to call with the GIL released */
+#define PyArray_malloc PyMem_RawMalloc
+#define PyArray_free PyMem_RawFree
+#define PyArray_realloc PyMem_RawRealloc
+#else
+#define PyArray_malloc malloc
+#define PyArray_free free
+#define PyArray_realloc realloc
+#endif
+
+/* Dimensions and strides */
+#define PyDimMem_NEW(size) \
+ ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp)))
+
+#define PyDimMem_FREE(ptr) PyArray_free(ptr)
+
+#define PyDimMem_RENEW(ptr,size) \
+ ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp)))
+
+/* forward declaration */
+struct _PyArray_Descr;
+
+/* These must deal with unaligned and swapped data if necessary */
+typedef PyObject * (PyArray_GetItemFunc) (void *, void *);
+typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *);
+
+typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp,
+ npy_intp, int, void *);
+
+typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *);
+typedef npy_bool (PyArray_NonzeroFunc)(void *, void *);
+
+
+/*
+ * These assume aligned and notswapped data -- a buffer will be used
+ * before or contiguous data will be obtained
+ */
+
+typedef int (PyArray_CompareFunc)(const void *, const void *, void *);
+typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *);
+
+typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *,
+ npy_intp, void *);
+
+typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *,
+ void *);
+
+/*
+ * XXX the ignore argument should be removed next time the API version
+ * is bumped. It used to be the separator.
+ */
+typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr,
+ char *ignore, struct _PyArray_Descr *);
+typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr,
+ struct _PyArray_Descr *);
+
+typedef int (PyArray_FillFunc)(void *, npy_intp, void *);
+
+typedef int (PyArray_SortFunc)(void *, npy_intp, void *);
+typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *);
+
+typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *);
+
+typedef int (PyArray_ScalarKindFunc)(void *);
+
+typedef struct {
+ npy_intp *ptr;
+ int len;
+} PyArray_Dims;
+
+typedef struct {
+ /*
+ * Functions to cast to most other standard types
+ * Can have some NULL entries. The types
+ * DATETIME, TIMEDELTA, and HALF go into the castdict
+ * even though they are built-in.
+ */
+ PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE];
+
+ /* The next four functions *cannot* be NULL */
+
+ /*
+ * Functions to get and set items with standard Python types
+ * -- not array scalars
+ */
+ PyArray_GetItemFunc *getitem;
+ PyArray_SetItemFunc *setitem;
+
+ /*
+ * Copy and/or swap data. Memory areas may not overlap
+ * Use memmove first if they might
+ */
+ PyArray_CopySwapNFunc *copyswapn;
+ PyArray_CopySwapFunc *copyswap;
+
+ /*
+ * Function to compare items
+ * Can be NULL
+ */
+ PyArray_CompareFunc *compare;
+
+ /*
+ * Function to select largest
+ * Can be NULL
+ */
+ PyArray_ArgFunc *argmax;
+
+ /*
+ * Function to compute dot product
+ * Can be NULL
+ */
+ PyArray_DotFunc *dotfunc;
+
+ /*
+ * Function to scan an ASCII file and
+ * place a single value plus possible separator
+ * Can be NULL
+ */
+ PyArray_ScanFunc *scanfunc;
+
+ /*
+ * Function to read a single value from a string
+ * and adjust the pointer; Can be NULL
+ */
+ PyArray_FromStrFunc *fromstr;
+
+ /*
+ * Function to determine if data is zero or not
+ * If NULL a default version is
+ * used at Registration time.
+ */
+ PyArray_NonzeroFunc *nonzero;
+
+ /*
+ * Used for arange. Should return 0 on success
+ * and -1 on failure.
+ * Can be NULL.
+ */
+ PyArray_FillFunc *fill;
+
+ /*
+ * Function to fill arrays with scalar values
+ * Can be NULL
+ */
+ PyArray_FillWithScalarFunc *fillwithscalar;
+
+ /*
+ * Sorting functions
+ * Can be NULL
+ */
+ PyArray_SortFunc *sort[NPY_NSORTS];
+ PyArray_ArgSortFunc *argsort[NPY_NSORTS];
+
+ /*
+ * Dictionary of additional casting functions
+ * PyArray_VectorUnaryFuncs
+ * which can be populated to support casting
+ * to other registered types. Can be NULL
+ */
+ PyObject *castdict;
+
+ /*
+ * Functions useful for generalizing
+ * the casting rules.
+ * Can be NULL;
+ */
+ PyArray_ScalarKindFunc *scalarkind;
+ int **cancastscalarkindto;
+ int *cancastto;
+
+ void *_unused1;
+ void *_unused2;
+ void *_unused3;
+
+ /*
+ * Function to select smallest
+ * Can be NULL
+ */
+ PyArray_ArgFunc *argmin;
+
+} PyArray_ArrFuncs;
+
+
+/* The item must be reference counted when it is inserted or extracted. */
+#define NPY_ITEM_REFCOUNT 0x01
+/* Same as needing REFCOUNT */
+#define NPY_ITEM_HASOBJECT 0x01
+/* Convert to list for pickling */
+#define NPY_LIST_PICKLE 0x02
+/* The item is a POINTER */
+#define NPY_ITEM_IS_POINTER 0x04
+/* memory needs to be initialized for this data-type */
+#define NPY_NEEDS_INIT 0x08
+/* operations need Python C-API so don't give-up thread. */
+#define NPY_NEEDS_PYAPI 0x10
+/* Use f.getitem when extracting elements of this data-type */
+#define NPY_USE_GETITEM 0x20
+/* Use f.setitem when setting creating 0-d array from this data-type.*/
+#define NPY_USE_SETITEM 0x40
+/* A sticky flag specifically for structured arrays */
+#define NPY_ALIGNED_STRUCT 0x80
+
+/*
+ *These are inherited for global data-type if any data-types in the
+ * field have them
+ */
+#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \
+ NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI)
+
+#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \
+ NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \
+ NPY_NEEDS_INIT | NPY_NEEDS_PYAPI)
+
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+/*
+ * Public version of the Descriptor struct as of 2.x
+ */
+typedef struct _PyArray_Descr {
+ PyObject_HEAD
+ /*
+ * the type object representing an
+ * instance of this type -- should not
+ * be two type_numbers with the same type
+ * object.
+ */
+ PyTypeObject *typeobj;
+ /* kind for this type */
+ char kind;
+ /* unique-character representing this type */
+ char type;
+ /*
+ * '>' (big), '<' (little), '|'
+ * (not-applicable), or '=' (native).
+ */
+ char byteorder;
+ /* Former flags flags space (unused) to ensure type_num is stable. */
+ char _former_flags;
+ /* number representing this type */
+ int type_num;
+ /* Space for dtype instance specific flags. */
+ npy_uint64 flags;
+ /* element size (itemsize) for this type */
+ npy_intp elsize;
+ /* alignment needed for this type */
+ npy_intp alignment;
+ /* metadata dict or NULL */
+ PyObject *metadata;
+ /* Cached hash value (-1 if not yet computed). */
+ npy_hash_t hash;
+ /* Unused slot (must be initialized to NULL) for future use */
+ void *reserved_null[2];
+} PyArray_Descr;
+
+#else /* 1.x and 2.x compatible version (only shared fields): */
+
+typedef struct _PyArray_Descr {
+ PyObject_HEAD
+ PyTypeObject *typeobj;
+ char kind;
+ char type;
+ char byteorder;
+ char _former_flags;
+ int type_num;
+} PyArray_Descr;
+
+/* To access modified fields, define the full 2.0 struct: */
+typedef struct {
+ PyObject_HEAD
+ PyTypeObject *typeobj;
+ char kind;
+ char type;
+ char byteorder;
+ char _former_flags;
+ int type_num;
+ npy_uint64 flags;
+ npy_intp elsize;
+ npy_intp alignment;
+ PyObject *metadata;
+ npy_hash_t hash;
+ void *reserved_null[2];
+} _PyArray_DescrNumPy2;
+
+#endif /* 1.x and 2.x compatible version */
+
+/*
+ * Semi-private struct with additional field of legacy descriptors (must
+ * check NPY_DT_is_legacy before casting/accessing). The struct is also not
+ * valid when running on 1.x (i.e. in public API use).
+ */
+typedef struct {
+ PyObject_HEAD
+ PyTypeObject *typeobj;
+ char kind;
+ char type;
+ char byteorder;
+ char _former_flags;
+ int type_num;
+ npy_uint64 flags;
+ npy_intp elsize;
+ npy_intp alignment;
+ PyObject *metadata;
+ npy_hash_t hash;
+ void *reserved_null[2];
+ struct _arr_descr *subarray;
+ PyObject *fields;
+ PyObject *names;
+ NpyAuxData *c_metadata;
+} _PyArray_LegacyDescr;
+
+
+/*
+ * Umodified PyArray_Descr struct identical to NumPy 1.x. This struct is
+ * used as a prototype for registering a new legacy DType.
+ * It is also used to access the fields in user code running on 1.x.
+ */
+typedef struct {
+ PyObject_HEAD
+ PyTypeObject *typeobj;
+ char kind;
+ char type;
+ char byteorder;
+ char flags;
+ int type_num;
+ int elsize;
+ int alignment;
+ struct _arr_descr *subarray;
+ PyObject *fields;
+ PyObject *names;
+ PyArray_ArrFuncs *f;
+ PyObject *metadata;
+ NpyAuxData *c_metadata;
+ npy_hash_t hash;
+} PyArray_DescrProto;
+
+
+typedef struct _arr_descr {
+ PyArray_Descr *base;
+ PyObject *shape; /* a tuple */
+} PyArray_ArrayDescr;
+
+/*
+ * Memory handler structure for array data.
+ */
+/* The declaration of free differs from PyMemAllocatorEx */
+typedef struct {
+ void *ctx;
+ void* (*malloc) (void *ctx, size_t size);
+ void* (*calloc) (void *ctx, size_t nelem, size_t elsize);
+ void* (*realloc) (void *ctx, void *ptr, size_t new_size);
+ void (*free) (void *ctx, void *ptr, size_t size);
+ /*
+ * This is the end of the version=1 struct. Only add new fields after
+ * this line
+ */
+} PyDataMemAllocator;
+
+typedef struct {
+ char name[127]; /* multiple of 64 to keep the struct aligned */
+ uint8_t version; /* currently 1 */
+ PyDataMemAllocator allocator;
+} PyDataMem_Handler;
+
+
+/*
+ * The main array object structure.
+ *
+ * It has been recommended to use the inline functions defined below
+ * (PyArray_DATA and friends) to access fields here for a number of
+ * releases. Direct access to the members themselves is deprecated.
+ * To ensure that your code does not use deprecated access,
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ * (or NPY_1_8_API_VERSION or higher as required).
+ */
+/* This struct will be moved to a private header in a future release */
+typedef struct tagPyArrayObject_fields {
+ PyObject_HEAD
+ /* Pointer to the raw data buffer */
+ char *data;
+ /* The number of dimensions, also called 'ndim' */
+ int nd;
+ /* The size in each dimension, also called 'shape' */
+ npy_intp *dimensions;
+ /*
+ * Number of bytes to jump to get to the
+ * next element in each dimension
+ */
+ npy_intp *strides;
+ /*
+ * This object is decref'd upon
+ * deletion of array. Except in the
+ * case of WRITEBACKIFCOPY which has
+ * special handling.
+ *
+ * For views it points to the original
+ * array, collapsed so no chains of
+ * views occur.
+ *
+ * For creation from buffer object it
+ * points to an object that should be
+ * decref'd on deletion
+ *
+ * For WRITEBACKIFCOPY flag this is an
+ * array to-be-updated upon calling
+ * PyArray_ResolveWritebackIfCopy
+ */
+ PyObject *base;
+ /* Pointer to type structure */
+ PyArray_Descr *descr;
+ /* Flags describing array -- see below */
+ int flags;
+ /* For weak references */
+ PyObject *weakreflist;
+#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
+ void *_buffer_info; /* private buffer info, tagged to allow warning */
+#endif
+ /*
+ * For malloc/calloc/realloc/free per object
+ */
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+ PyObject *mem_handler;
+#endif
+} PyArrayObject_fields;
+
+/*
+ * To hide the implementation details, we only expose
+ * the Python struct HEAD.
+ */
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+/*
+ * Can't put this in npy_deprecated_api.h like the others.
+ * PyArrayObject field access is deprecated as of NumPy 1.7.
+ */
+typedef PyArrayObject_fields PyArrayObject;
+#else
+typedef struct tagPyArrayObject {
+ PyObject_HEAD
+} PyArrayObject;
+#endif
+
+/*
+ * Removed 2020-Nov-25, NumPy 1.20
+ * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields))
+ *
+ * The above macro was removed as it gave a false sense of a stable ABI
+ * with respect to the structures size. If you require a runtime constant,
+ * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please
+ * see the PyArrayObject documentation or ask the NumPy developers for
+ * information on how to correctly replace the macro in a way that is
+ * compatible with multiple NumPy versions.
+ */
+
+/* Mirrors buffer object to ptr */
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *base;
+ void *ptr;
+ npy_intp len;
+ int flags;
+} PyArray_Chunk;
+
+typedef struct {
+ NPY_DATETIMEUNIT base;
+ int num;
+} PyArray_DatetimeMetaData;
+
+typedef struct {
+ NpyAuxData base;
+ PyArray_DatetimeMetaData meta;
+} PyArray_DatetimeDTypeMetaData;
+
+/*
+ * This structure contains an exploded view of a date-time value.
+ * NaT is represented by year == NPY_DATETIME_NAT.
+ */
+typedef struct {
+ npy_int64 year;
+ npy_int32 month, day, hour, min, sec, us, ps, as;
+} npy_datetimestruct;
+
+/* This structure contains an exploded view of a timedelta value */
+typedef struct {
+ npy_int64 day;
+ npy_int32 sec, us, ps, as;
+} npy_timedeltastruct;
+
+typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
+
+/*
+ * Means c-style contiguous (last index varies the fastest). The data
+ * elements right after each other.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_C_CONTIGUOUS 0x0001
+
+/*
+ * Set if array is a contiguous Fortran array: the first index varies
+ * the fastest in memory (strides array is reverse of C-contiguous
+ * array)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_F_CONTIGUOUS 0x0002
+
+/*
+ * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a
+ * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with
+ * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS
+ * at the same time if they have either zero or one element.
+ * A higher dimensional array always has the same contiguity flags as
+ * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are
+ * effectively ignored when checking for contiguity.
+ */
+
+/*
+ * If set, the array owns the data: it will be free'd when the array
+ * is deleted.
+ *
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_OWNDATA 0x0004
+
+/*
+ * An array never has the next four set; they're only used as parameter
+ * flags to the various FromAny functions
+ *
+ * This flag may be requested in constructor functions.
+ */
+
+/* Cause a cast to occur regardless of whether or not it is safe. */
+#define NPY_ARRAY_FORCECAST 0x0010
+
+/*
+ * Always copy the array. Returned arrays are always CONTIGUOUS,
+ * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSURECOPY 0x0020
+
+/*
+ * Make sure the returned array is a base-class ndarray
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSUREARRAY 0x0040
+
+/*
+ * Make sure that the strides are in units of the element size Needed
+ * for some operations with record-arrays.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ELEMENTSTRIDES 0x0080
+
+/*
+ * Array data is aligned on the appropriate memory address for the type
+ * stored according to how the compiler would align things (e.g., an
+ * array of integers (4 bytes each) starts on a memory address that's
+ * a multiple of 4)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_ALIGNED 0x0100
+
+/*
+ * Array data has the native endianness
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_NOTSWAPPED 0x0200
+
+/*
+ * Array data is writeable
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_WRITEABLE 0x0400
+
+/*
+ * If this flag is set, then base contains a pointer to an array of
+ * the same size that should be updated with the current contents of
+ * this array when PyArray_ResolveWritebackIfCopy is called.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000
+
+/*
+ * No copy may be made while converting from an object/array (result is a view)
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSURENOCOPY 0x4000
+
+/*
+ * NOTE: there are also internal flags defined in multiarray/arrayobject.h,
+ * which start at bit 31 and work down.
+ */
+
+#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \
+ NPY_ARRAY_WRITEABLE)
+#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \
+ NPY_ARRAY_WRITEABLE | \
+ NPY_ARRAY_NOTSWAPPED)
+#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO)
+#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \
+ NPY_ARRAY_WRITEBACKIFCOPY)
+#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO)
+#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \
+ NPY_ARRAY_WRITEBACKIFCOPY)
+
+#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+
+/* This flag is for the array interface, not PyArrayObject */
+#define NPY_ARR_HAS_DESCR 0x0800
+
+
+
+
+/*
+ * Size of internal buffers used for alignment Make BUFSIZE a multiple
+ * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned
+ */
+#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble))
+#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000)
+#define NPY_BUFSIZE 8192
+/* buffer stress test size: */
+/*#define NPY_BUFSIZE 17*/
+
+/*
+ * C API: consists of Macros and functions. The MACROS are defined
+ * here.
+ */
+
+
+#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE)
+#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED)
+
+#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS)
+
+/* the variable is used in some places, so always define it */
+#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL;
+#if NPY_ALLOW_THREADS
+#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0);
+#define NPY_END_THREADS do { if (_save) \
+ { PyEval_RestoreThread(_save); _save = NULL;} } while (0);
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \
+ { _save = PyEval_SaveThread();} } while (0);
+
+
+#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__;
+#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0);
+#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0);
+#else
+#define NPY_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS
+#define NPY_END_THREADS
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size)
+#define NPY_BEGIN_THREADS_DESCR(dtype)
+#define NPY_END_THREADS_DESCR(dtype)
+#define NPY_ALLOW_C_API_DEF
+#define NPY_ALLOW_C_API
+#define NPY_DISABLE_C_API
+#endif
+
+/**********************************
+ * The nditer object, added in 1.6
+ **********************************/
+
+/* The actual structure of the iterator is an internal detail */
+typedef struct NpyIter_InternalOnly NpyIter;
+
+/* Iterator function pointers that may be specialized */
+typedef int (NpyIter_IterNextFunc)(NpyIter *iter);
+typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter,
+ npy_intp *outcoords);
+
+/*** Global flags that may be passed to the iterator constructors ***/
+
+/* Track an index representing C order */
+#define NPY_ITER_C_INDEX 0x00000001
+/* Track an index representing Fortran order */
+#define NPY_ITER_F_INDEX 0x00000002
+/* Track a multi-index */
+#define NPY_ITER_MULTI_INDEX 0x00000004
+/* User code external to the iterator does the 1-dimensional innermost loop */
+#define NPY_ITER_EXTERNAL_LOOP 0x00000008
+/* Convert all the operands to a common data type */
+#define NPY_ITER_COMMON_DTYPE 0x00000010
+/* Operands may hold references, requiring API access during iteration */
+#define NPY_ITER_REFS_OK 0x00000020
+/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */
+#define NPY_ITER_ZEROSIZE_OK 0x00000040
+/* Permits reductions (size-0 stride with dimension size > 1) */
+#define NPY_ITER_REDUCE_OK 0x00000080
+/* Enables sub-range iteration */
+#define NPY_ITER_RANGED 0x00000100
+/* Enables buffering */
+#define NPY_ITER_BUFFERED 0x00000200
+/* When buffering is enabled, grows the inner loop if possible */
+#define NPY_ITER_GROWINNER 0x00000400
+/* Delay allocation of buffers until first Reset* call */
+#define NPY_ITER_DELAY_BUFALLOC 0x00000800
+/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */
+#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000
+/*
+ * If output operands overlap with other operands (based on heuristics that
+ * has false positives but no false negatives), make temporary copies to
+ * eliminate overlap.
+ */
+#define NPY_ITER_COPY_IF_OVERLAP 0x00002000
+
+/*** Per-operand flags that may be passed to the iterator constructors ***/
+
+/* The operand will be read from and written to */
+#define NPY_ITER_READWRITE 0x00010000
+/* The operand will only be read from */
+#define NPY_ITER_READONLY 0x00020000
+/* The operand will only be written to */
+#define NPY_ITER_WRITEONLY 0x00040000
+/* The operand's data must be in native byte order */
+#define NPY_ITER_NBO 0x00080000
+/* The operand's data must be aligned */
+#define NPY_ITER_ALIGNED 0x00100000
+/* The operand's data must be contiguous (within the inner loop) */
+#define NPY_ITER_CONTIG 0x00200000
+/* The operand may be copied to satisfy requirements */
+#define NPY_ITER_COPY 0x00400000
+/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */
+#define NPY_ITER_UPDATEIFCOPY 0x00800000
+/* Allocate the operand if it is NULL */
+#define NPY_ITER_ALLOCATE 0x01000000
+/* If an operand is allocated, don't use any subtype */
+#define NPY_ITER_NO_SUBTYPE 0x02000000
+/* This is a virtual array slot, operand is NULL but temporary data is there */
+#define NPY_ITER_VIRTUAL 0x04000000
+/* Require that the dimension match the iterator dimensions exactly */
+#define NPY_ITER_NO_BROADCAST 0x08000000
+/* A mask is being used on this array, affects buffer -> array copy */
+#define NPY_ITER_WRITEMASKED 0x10000000
+/* This array is the mask for all WRITEMASKED operands */
+#define NPY_ITER_ARRAYMASK 0x20000000
+/* Assume iterator order data access for COPY_IF_OVERLAP */
+#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000
+
+#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff
+#define NPY_ITER_PER_OP_FLAGS 0xffff0000
+
+
+/*****************************
+ * Basic iterator object
+ *****************************/
+
+/* FWD declaration */
+typedef struct PyArrayIterObject_tag PyArrayIterObject;
+
+/*
+ * type of the function which translates a set of coordinates to a
+ * pointer to the data
+ */
+typedef char* (*npy_iter_get_dataptr_t)(
+ PyArrayIterObject* iter, const npy_intp*);
+
+struct PyArrayIterObject_tag {
+ PyObject_HEAD
+ int nd_m1; /* number of dimensions - 1 */
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */
+ npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */
+ npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */
+ npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */
+ npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ npy_bool contiguous;
+
+ npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2];
+ npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS];
+ npy_iter_get_dataptr_t translate;
+} ;
+
+
+/* Iterator API */
+#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type)
+
+#define _PyAIT(it) ((PyArrayIterObject *)(it))
+#define PyArray_ITER_RESET(it) do { \
+ _PyAIT(it)->index = 0; \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ memset(_PyAIT(it)->coordinates, 0, \
+ (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \
+} while (0)
+
+#define _PyArray_ITER_NEXT1(it) do { \
+ (it)->dataptr += _PyAIT(it)->strides[0]; \
+ (it)->coordinates[0]++; \
+} while (0)
+
+#define _PyArray_ITER_NEXT2(it) do { \
+ if ((it)->coordinates[1] < (it)->dims_m1[1]) { \
+ (it)->coordinates[1]++; \
+ (it)->dataptr += (it)->strides[1]; \
+ } \
+ else { \
+ (it)->coordinates[1] = 0; \
+ (it)->coordinates[0]++; \
+ (it)->dataptr += (it)->strides[0] - \
+ (it)->backstrides[1]; \
+ } \
+} while (0)
+
+#define PyArray_ITER_NEXT(it) do { \
+ _PyAIT(it)->index++; \
+ if (_PyAIT(it)->nd_m1 == 0) { \
+ _PyArray_ITER_NEXT1(_PyAIT(it)); \
+ } \
+ else if (_PyAIT(it)->contiguous) \
+ _PyAIT(it)->dataptr += PyArray_ITEMSIZE(_PyAIT(it)->ao); \
+ else if (_PyAIT(it)->nd_m1 == 1) { \
+ _PyArray_ITER_NEXT2(_PyAIT(it)); \
+ } \
+ else { \
+ int __npy_i; \
+ for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \
+ if (_PyAIT(it)->coordinates[__npy_i] < \
+ _PyAIT(it)->dims_m1[__npy_i]) { \
+ _PyAIT(it)->coordinates[__npy_i]++; \
+ _PyAIT(it)->dataptr += \
+ _PyAIT(it)->strides[__npy_i]; \
+ break; \
+ } \
+ else { \
+ _PyAIT(it)->coordinates[__npy_i] = 0; \
+ _PyAIT(it)->dataptr -= \
+ _PyAIT(it)->backstrides[__npy_i]; \
+ } \
+ } \
+ } \
+} while (0)
+
+#define PyArray_ITER_GOTO(it, destination) do { \
+ int __npy_i; \
+ _PyAIT(it)->index = 0; \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \
+ if (destination[__npy_i] < 0) { \
+ destination[__npy_i] += \
+ _PyAIT(it)->dims_m1[__npy_i]+1; \
+ } \
+ _PyAIT(it)->dataptr += destination[__npy_i] * \
+ _PyAIT(it)->strides[__npy_i]; \
+ _PyAIT(it)->coordinates[__npy_i] = \
+ destination[__npy_i]; \
+ _PyAIT(it)->index += destination[__npy_i] * \
+ ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \
+ _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \
+ } \
+} while (0)
+
+#define PyArray_ITER_GOTO1D(it, ind) do { \
+ int __npy_i; \
+ npy_intp __npy_ind = (npy_intp)(ind); \
+ if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \
+ _PyAIT(it)->index = __npy_ind; \
+ if (_PyAIT(it)->nd_m1 == 0) { \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+ __npy_ind * _PyAIT(it)->strides[0]; \
+ } \
+ else if (_PyAIT(it)->contiguous) \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+ __npy_ind * PyArray_ITEMSIZE(_PyAIT(it)->ao); \
+ else { \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \
+ __npy_i++) { \
+ _PyAIT(it)->coordinates[__npy_i] = \
+ (__npy_ind / _PyAIT(it)->factors[__npy_i]); \
+ _PyAIT(it)->dataptr += \
+ (__npy_ind / _PyAIT(it)->factors[__npy_i]) \
+ * _PyAIT(it)->strides[__npy_i]; \
+ __npy_ind %= _PyAIT(it)->factors[__npy_i]; \
+ } \
+ } \
+} while (0)
+
+#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr))
+
+#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size)
+
+
+/*
+ * Any object passed to PyArray_Broadcast must be binary compatible
+ * with this structure.
+ */
+
+typedef struct {
+ PyObject_HEAD
+ int numiter; /* number of iters */
+ npy_intp size; /* broadcasted size */
+ npy_intp index; /* current index */
+ int nd; /* number of dims */
+ npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; /* dimensions */
+ /*
+ * Space for the individual iterators, do not specify size publicly
+ * to allow changing it more easily.
+ * One reason is that Cython uses this for checks and only allows
+ * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS
+ * to be runtime dependent.
+ */
+#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
+ PyArrayIterObject *iters[64];
+#elif defined(__cplusplus)
+ /*
+ * C++ doesn't strictly support flexible members and gives compilers
+ * warnings (pedantic only), so we lie. We can't make it 64 because
+ * then Cython is unhappy (larger struct at runtime is OK smaller not).
+ */
+ PyArrayIterObject *iters[32];
+#else
+ PyArrayIterObject *iters[];
+#endif
+} PyArrayMultiIterObject;
+
+#define _PyMIT(m) ((PyArrayMultiIterObject *)(m))
+#define PyArray_MultiIter_RESET(multi) do { \
+ int __npy_mi; \
+ _PyMIT(multi)->index = 0; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \
+ } \
+} while (0)
+
+#define PyArray_MultiIter_NEXT(multi) do { \
+ int __npy_mi; \
+ _PyMIT(multi)->index++; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \
+ } \
+} while (0)
+
+#define PyArray_MultiIter_GOTO(multi, dest) do { \
+ int __npy_mi; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \
+ } \
+ _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \
+} while (0)
+
+#define PyArray_MultiIter_GOTO1D(multi, ind) do { \
+ int __npy_mi; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \
+ } \
+ _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \
+} while (0)
+
+#define PyArray_MultiIter_DATA(multi, i) \
+ ((void *)(_PyMIT(multi)->iters[i]->dataptr))
+
+#define PyArray_MultiIter_NEXTi(multi, i) \
+ PyArray_ITER_NEXT(_PyMIT(multi)->iters[i])
+
+#define PyArray_MultiIter_NOTDONE(multi) \
+ (_PyMIT(multi)->index < _PyMIT(multi)->size)
+
+
+static NPY_INLINE int
+PyArray_MultiIter_NUMITER(PyArrayMultiIterObject *multi)
+{
+ return multi->numiter;
+}
+
+
+static NPY_INLINE npy_intp
+PyArray_MultiIter_SIZE(PyArrayMultiIterObject *multi)
+{
+ return multi->size;
+}
+
+
+static NPY_INLINE npy_intp
+PyArray_MultiIter_INDEX(PyArrayMultiIterObject *multi)
+{
+ return multi->index;
+}
+
+
+static NPY_INLINE int
+PyArray_MultiIter_NDIM(PyArrayMultiIterObject *multi)
+{
+ return multi->nd;
+}
+
+
+static NPY_INLINE npy_intp *
+PyArray_MultiIter_DIMS(PyArrayMultiIterObject *multi)
+{
+ return multi->dimensions;
+}
+
+
+static NPY_INLINE void **
+PyArray_MultiIter_ITERS(PyArrayMultiIterObject *multi)
+{
+ return (void**)multi->iters;
+}
+
+
+enum {
+ NPY_NEIGHBORHOOD_ITER_ZERO_PADDING,
+ NPY_NEIGHBORHOOD_ITER_ONE_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING,
+ NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING
+};
+
+typedef struct {
+ PyObject_HEAD
+
+ /*
+ * PyArrayIterObject part: keep this in this exact order
+ */
+ int nd_m1; /* number of dimensions - 1 */
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */
+ npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */
+ npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */
+ npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */
+ npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ npy_bool contiguous;
+
+ npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2];
+ npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS];
+ npy_iter_get_dataptr_t translate;
+
+ /*
+ * New members
+ */
+ npy_intp nd;
+
+ /* Dimensions is the dimension of the array */
+ npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS];
+
+ /*
+ * Neighborhood points coordinates are computed relatively to the
+ * point pointed by _internal_iter
+ */
+ PyArrayIterObject* _internal_iter;
+ /*
+ * To keep a reference to the representation of the constant value
+ * for constant padding
+ */
+ char* constant;
+
+ int mode;
+} PyArrayNeighborhoodIterObject;
+
+/*
+ * Neighborhood iterator API
+ */
+
+/* General: those work for any mode */
+static inline int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter);
+static inline int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter);
+#if 0
+static inline int
+PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
+#endif
+
+/*
+ * Include inline implementations - functions defined there are not
+ * considered public API
+ */
+#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+#include "_neighborhood_iterator_imp.h"
+#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+
+
+
+/* The default array type */
+#define NPY_DEFAULT_TYPE NPY_DOUBLE
+/* default integer type defined in npy_2_compat header */
+
+/*
+ * All sorts of useful ways to look into a PyArrayObject. It is recommended
+ * to use PyArrayObject * objects instead of always casting from PyObject *,
+ * for improved type checking.
+ *
+ * In many cases here the macro versions of the accessors are deprecated,
+ * but can't be immediately changed to inline functions because the
+ * preexisting macros accept PyObject * and do automatic casts. Inline
+ * functions accepting PyArrayObject * provides for some compile-time
+ * checking of correctness when working with these objects in C.
+ */
+
+#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \
+ PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS))
+
+#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \
+ (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)))
+
+#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \
+ NPY_ARRAY_F_CONTIGUOUS : 0))
+
+static inline int
+PyArray_NDIM(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->nd;
+}
+
+static inline void *
+PyArray_DATA(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->data;
+}
+
+static inline char *
+PyArray_BYTES(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->data;
+}
+
+static inline npy_intp *
+PyArray_DIMS(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+static inline npy_intp *
+PyArray_STRIDES(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->strides;
+}
+
+static inline npy_intp
+PyArray_DIM(const PyArrayObject *arr, int idim)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions[idim];
+}
+
+static inline npy_intp
+PyArray_STRIDE(const PyArrayObject *arr, int istride)
+{
+ return ((PyArrayObject_fields *)arr)->strides[istride];
+}
+
+static inline NPY_RETURNS_BORROWED_REF PyObject *
+PyArray_BASE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->base;
+}
+
+static inline NPY_RETURNS_BORROWED_REF PyArray_Descr *
+PyArray_DESCR(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static inline int
+PyArray_FLAGS(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->flags;
+}
+
+
+static inline int
+PyArray_TYPE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr->type_num;
+}
+
+static inline int
+PyArray_CHKFLAGS(const PyArrayObject *arr, int flags)
+{
+ return (PyArray_FLAGS(arr) & flags) == flags;
+}
+
+static inline PyArray_Descr *
+PyArray_DTYPE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static inline npy_intp *
+PyArray_SHAPE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+/*
+ * Enables the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static inline void
+PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags)
+{
+ ((PyArrayObject_fields *)arr)->flags |= flags;
+}
+
+/*
+ * Clears the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static inline void
+PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
+{
+ ((PyArrayObject_fields *)arr)->flags &= ~flags;
+}
+
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+ static inline NPY_RETURNS_BORROWED_REF PyObject *
+ PyArray_HANDLER(PyArrayObject *arr)
+ {
+ return ((PyArrayObject_fields *)arr)->mem_handler;
+ }
+#endif
+
+#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL)
+
+#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \
+ ((type) == NPY_USHORT) || \
+ ((type) == NPY_UINT) || \
+ ((type) == NPY_ULONG) || \
+ ((type) == NPY_ULONGLONG))
+
+#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \
+ ((type) == NPY_SHORT) || \
+ ((type) == NPY_INT) || \
+ ((type) == NPY_LONG) || \
+ ((type) == NPY_LONGLONG))
+
+#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \
+ ((type) <= NPY_ULONGLONG))
+
+#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \
+ ((type) <= NPY_LONGDOUBLE)) || \
+ ((type) == NPY_HALF))
+
+#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \
+ ((type) == NPY_HALF))
+
+#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \
+ ((type) == NPY_UNICODE))
+
+#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \
+ ((type) <= NPY_CLONGDOUBLE))
+
+#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \
+ ((type) <=NPY_VOID))
+
+#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \
+ ((type) <=NPY_TIMEDELTA))
+
+#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \
+ ((type) < NPY_USERDEF+ \
+ NPY_NUMUSERTYPES))
+
+#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \
+ PyTypeNum_ISUSERDEF(type))
+
+#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT)
+
+
+#define PyDataType_ISLEGACY(dtype) ((dtype)->type_num < NPY_VSTRING && ((dtype)->type_num >= 0))
+#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num )
+#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)
+/*
+ * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED,
+ * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific
+ * lookup and are defined in npy_2_compat.h.
+ */
+
+
+#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
+#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj))
+#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj))
+#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj))
+#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj))
+#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj))
+#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj))
+#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj))
+#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj))
+#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj))
+#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj))
+
+ /*
+ * FIXME: This should check for a flag on the data-type that
+ * states whether or not it is variable length. Because the
+ * ISFLEXIBLE check is hard-coded to the built-in data-types.
+ */
+#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+
+#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj))
+
+
+#define NPY_LITTLE '<'
+#define NPY_BIG '>'
+#define NPY_NATIVE '='
+#define NPY_SWAP 's'
+#define NPY_IGNORE '|'
+
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+#define NPY_NATBYTE NPY_BIG
+#define NPY_OPPBYTE NPY_LITTLE
+#else
+#define NPY_NATBYTE NPY_LITTLE
+#define NPY_OPPBYTE NPY_BIG
+#endif
+
+#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE)
+#define PyArray_IsNativeByteOrder PyArray_ISNBO
+#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder)
+#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \
+ PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY)
+#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO)
+#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY)
+#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO)
+#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED)
+#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED)
+
+
+#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder)
+#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d))
+
+/************************************************************
+ * A struct used by PyArray_CreateSortedStridePerm, new in 1.7.
+ ************************************************************/
+
+typedef struct {
+ npy_intp perm, stride;
+} npy_stride_sort_item;
+
+/************************************************************
+ * This is the form of the struct that's stored in the
+ * PyCapsule returned by an array's __array_struct__ attribute. See
+ * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
+ * documentation.
+ ************************************************************/
+typedef struct {
+ int two; /*
+ * contains the integer 2 as a sanity
+ * check
+ */
+
+ int nd; /* number of dimensions */
+
+ char typekind; /*
+ * kind in array --- character code of
+ * typestr
+ */
+
+ int itemsize; /* size of each element */
+
+ int flags; /*
+ * how should be data interpreted. Valid
+ * flags are CONTIGUOUS (1), F_CONTIGUOUS (2),
+ * ALIGNED (0x100), NOTSWAPPED (0x200), and
+ * WRITEABLE (0x400). ARR_HAS_DESCR (0x800)
+ * states that arrdescr field is present in
+ * structure
+ */
+
+ npy_intp *shape; /*
+ * A length-nd array of shape
+ * information
+ */
+
+ npy_intp *strides; /* A length-nd array of stride information */
+
+ void *data; /* A pointer to the first element of the array */
+
+ PyObject *descr; /*
+ * A list of fields or NULL (ignored if flags
+ * does not have ARR_HAS_DESCR flag set)
+ */
+} PyArrayInterface;
+
+
+/****************************************
+ * NpyString
+ *
+ * Types used by the NpyString API.
+ ****************************************/
+
+/*
+ * A "packed" encoded string. The string data must be accessed by first unpacking the string.
+ */
+typedef struct npy_packed_static_string npy_packed_static_string;
+
+/*
+ * An unpacked read-only view onto the data in a packed string
+ */
+typedef struct npy_unpacked_static_string {
+ size_t size;
+ const char *buf;
+} npy_static_string;
+
+/*
+ * Handles heap allocations for static strings.
+ */
+typedef struct npy_string_allocator npy_string_allocator;
+
+typedef struct {
+ PyArray_Descr base;
+ // The object representing a null value
+ PyObject *na_object;
+ // Flag indicating whether or not to coerce arbitrary objects to strings
+ char coerce;
+ // Flag indicating the na object is NaN-like
+ char has_nan_na;
+ // Flag indicating the na object is a string
+ char has_string_na;
+ // If nonzero, indicates that this instance is owned by an array already
+ char array_owned;
+ // The string data to use when a default string is needed
+ npy_static_string default_string;
+ // The name of the missing data object, if any
+ npy_static_string na_name;
+ // the allocator should only be directly accessed after
+ // acquiring the allocator_lock and the lock should
+ // be released immediately after the allocator is
+ // no longer needed
+ npy_string_allocator *allocator;
+} PyArray_StringDTypeObject;
+
+/*
+ * PyArray_DTypeMeta related definitions.
+ *
+ * As of now, this API is preliminary and will be extended as necessary.
+ */
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+ /*
+ * The Structures defined in this block are currently considered
+ * private API and may change without warning!
+ * Part of this (at least the size) is expected to be public API without
+ * further modifications.
+ */
+ /* TODO: Make this definition public in the API, as soon as its settled */
+ NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type;
+
+ /*
+ * While NumPy DTypes would not need to be heap types the plan is to
+ * make DTypes available in Python at which point they will be heap types.
+ * Since we also wish to add fields to the DType class, this looks like
+ * a typical instance definition, but with PyHeapTypeObject instead of
+ * only the PyObject_HEAD.
+ * This must only be exposed very extremely careful consideration, since
+ * it is a fairly complex construct which may be better to allow
+ * refactoring of.
+ */
+ typedef struct {
+ PyHeapTypeObject super;
+
+ /*
+ * Most DTypes will have a singleton default instance, for the
+ * parametric legacy DTypes (bytes, string, void, datetime) this
+ * may be a pointer to the *prototype* instance?
+ */
+ PyArray_Descr *singleton;
+ /* Copy of the legacy DTypes type number, usually invalid. */
+ int type_num;
+
+ /* The type object of the scalar instances (may be NULL?) */
+ PyTypeObject *scalar_type;
+ /*
+ * DType flags to signal legacy, parametric, or
+ * abstract. But plenty of space for additional information/flags.
+ */
+ npy_uint64 flags;
+
+ /*
+ * Use indirection in order to allow a fixed size for this struct.
+ * A stable ABI size makes creating a static DType less painful
+ * while also ensuring flexibility for all opaque API (with one
+ * indirection due the pointer lookup).
+ */
+ void *dt_slots;
+ void *reserved[3];
+ } PyArray_DTypeMeta;
+
+#endif /* NPY_INTERNAL_BUILD */
+
+
+/*
+ * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files
+ * npy_*_*_deprecated_api.h are only included from here and nowhere else.
+ */
+#ifdef NPY_DEPRECATED_INCLUDES
+#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES."
+#endif
+#define NPY_DEPRECATED_INCLUDES
+/*
+ * There is no file npy_1_8_deprecated_api.h since there are no additional
+ * deprecated API features in NumPy 1.8.
+ *
+ * Note to maintainers: insert code like the following in future NumPy
+ * versions.
+ *
+ * #if !defined(NPY_NO_DEPRECATED_API) || \
+ * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION)
+ * #include "npy_1_9_deprecated_api.h"
+ * #endif
+ * Then in the npy_1_9_deprecated_api.h header add something like this
+ * --------------------
+ * #ifndef NPY_DEPRECATED_INCLUDES
+ * #error "Should never include npy_*_*_deprecated_api directly."
+ * #endif
+ * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_
+ * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_
+ *
+ * #ifndef NPY_NO_DEPRECATED_API
+ * #if defined(_WIN32)
+ * #define _WARN___STR2__(x) #x
+ * #define _WARN___STR1__(x) _WARN___STR2__(x)
+ * #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
+ * #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \
+ * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
+ * #else
+ * #warning "Using deprecated NumPy API, disable it with " \
+ * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
+ * #endif
+ * #endif
+ * --------------------
+ */
+#undef NPY_DEPRECATED_INCLUDES
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_compat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_compat.h
new file mode 100644
index 00000000..e39e65ae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_compat.h
@@ -0,0 +1,249 @@
+/*
+ * This header file defines relevant features which:
+ * - Require runtime inspection depending on the NumPy version.
+ * - May be needed when compiling with an older version of NumPy to allow
+ * a smooth transition.
+ *
+ * As such, it is shipped with NumPy 2.0, but designed to be vendored in full
+ * or parts by downstream projects.
+ *
+ * It must be included after any other includes. `import_array()` must have
+ * been called in the scope or version dependency will misbehave, even when
+ * only `PyUFunc_` API is used.
+ *
+ * If required complicated defs (with inline functions) should be written as:
+ *
+ * #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+ * Simple definition when NumPy 2.0 API is guaranteed.
+ * #else
+ * static inline definition of a 1.x compatibility shim
+ * #if NPY_ABI_VERSION < 0x02000000
+ * Make 1.x compatibility shim the public API (1.x only branch)
+ * #else
+ * Runtime dispatched version (1.x or 2.x)
+ * #endif
+ * #endif
+ *
+ * An internal build always passes NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_
+
+/*
+ * New macros for accessing real and complex part of a complex number can be
+ * found in "npy_2_complexcompat.h".
+ */
+
+
+/*
+ * This header is meant to be included by downstream directly for 1.x compat.
+ * In that case we need to ensure that users first included the full headers
+ * and not just `ndarraytypes.h`.
+ */
+
+#ifndef NPY_FEATURE_VERSION
+ #error "The NumPy 2 compat header requires `import_array()` for which " \
+ "the `ndarraytypes.h` header include is not sufficient. Please " \
+ "include it after `numpy/ndarrayobject.h` or similar.\n" \
+ "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \
+ "which is defined in the compat header and is lightweight (can be)."
+#endif
+
+#if NPY_ABI_VERSION < 0x02000000
+ /*
+ * Define 2.0 feature version as it is needed below to decide whether we
+ * compile for both 1.x and 2.x (defining it guarantees 1.x only).
+ */
+ #define NPY_2_0_API_VERSION 0x00000012
+ /*
+ * If we are compiling with NumPy 1.x, PyArray_RUNTIME_VERSION so we
+ * pretend the `PyArray_RUNTIME_VERSION` is `NPY_FEATURE_VERSION`.
+ * This allows downstream to use `PyArray_RUNTIME_VERSION` if they need to.
+ */
+ #define PyArray_RUNTIME_VERSION NPY_FEATURE_VERSION
+ /* Compiling on NumPy 1.x where these are the same: */
+ #define PyArray_DescrProto PyArray_Descr
+#endif
+
+
+/*
+ * Define a better way to call `_import_array()` to simplify backporting as
+ * we now require imports more often (necessary to make ABI flexible).
+ */
+#ifdef import_array1
+
+static inline int
+PyArray_ImportNumPyAPI(void)
+{
+ if (NPY_UNLIKELY(PyArray_API == NULL)) {
+ import_array1(-1);
+ }
+ return 0;
+}
+
+#endif /* import_array1 */
+
+
+/*
+ * NPY_DEFAULT_INT
+ *
+ * The default integer has changed, `NPY_DEFAULT_INT` is available at runtime
+ * for use as type number, e.g. `PyArray_DescrFromType(NPY_DEFAULT_INT)`.
+ *
+ * NPY_RAVEL_AXIS
+ *
+ * This was introduced in NumPy 2.0 to allow indicating that an axis should be
+ * raveled in an operation. Before NumPy 2.0, NPY_MAXDIMS was used for this purpose.
+ *
+ * NPY_MAXDIMS
+ *
+ * A constant indicating the maximum number dimensions allowed when creating
+ * an ndarray.
+ *
+ * NPY_NTYPES_LEGACY
+ *
+ * The number of built-in NumPy dtypes.
+ */
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+ #define NPY_DEFAULT_INT NPY_INTP
+ #define NPY_RAVEL_AXIS NPY_MIN_INT
+ #define NPY_MAXARGS 64
+
+#elif NPY_ABI_VERSION < 0x02000000
+ #define NPY_DEFAULT_INT NPY_LONG
+ #define NPY_RAVEL_AXIS 32
+ #define NPY_MAXARGS 32
+
+ /* Aliases of 2.x names to 1.x only equivalent names */
+ #define NPY_NTYPES NPY_NTYPES_LEGACY
+ #define PyArray_DescrProto PyArray_Descr
+ #define _PyArray_LegacyDescr PyArray_Descr
+ /* NumPy 2 definition always works, but add it for 1.x only */
+ #define PyDataType_ISLEGACY(dtype) (1)
+#else
+ #define NPY_DEFAULT_INT \
+ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG)
+ #define NPY_RAVEL_AXIS \
+ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32)
+ #define NPY_MAXARGS \
+ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32)
+#endif
+
+
+/*
+ * Access inline functions for descriptor fields. Except for the first
+ * few fields, these needed to be moved (elsize, alignment) for
+ * additional space. Or they are descriptor specific and are not generally
+ * available anymore (metadata, c_metadata, subarray, names, fields).
+ *
+ * Most of these are defined via the `DESCR_ACCESSOR` macro helper.
+ */
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION || NPY_ABI_VERSION < 0x02000000
+ /* Compiling for 1.x or 2.x only, direct field access is OK: */
+
+ static inline void
+ PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size)
+ {
+ dtype->elsize = size;
+ }
+
+ static inline npy_uint64
+ PyDataType_FLAGS(const PyArray_Descr *dtype)
+ {
+ #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+ return dtype->flags;
+ #else
+ return (unsigned char)dtype->flags; /* Need unsigned cast on 1.x */
+ #endif
+ }
+
+ #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \
+ static inline type \
+ PyDataType_##FIELD(const PyArray_Descr *dtype) { \
+ if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \
+ return (type)0; \
+ } \
+ return ((_PyArray_LegacyDescr *)dtype)->field; \
+ }
+#else /* compiling for both 1.x and 2.x */
+
+ static inline void
+ PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size)
+ {
+ if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {
+ ((_PyArray_DescrNumPy2 *)dtype)->elsize = size;
+ }
+ else {
+ ((PyArray_DescrProto *)dtype)->elsize = (int)size;
+ }
+ }
+
+ static inline npy_uint64
+ PyDataType_FLAGS(const PyArray_Descr *dtype)
+ {
+ if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {
+ return ((_PyArray_DescrNumPy2 *)dtype)->flags;
+ }
+ else {
+ return (unsigned char)((PyArray_DescrProto *)dtype)->flags;
+ }
+ }
+
+ /* Cast to LegacyDescr always fine but needed when `legacy_only` */
+ #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \
+ static inline type \
+ PyDataType_##FIELD(const PyArray_Descr *dtype) { \
+ if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \
+ return (type)0; \
+ } \
+ if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { \
+ return ((_PyArray_LegacyDescr *)dtype)->field; \
+ } \
+ else { \
+ return ((PyArray_DescrProto *)dtype)->field; \
+ } \
+ }
+#endif
+
+DESCR_ACCESSOR(ELSIZE, elsize, npy_intp, 0)
+DESCR_ACCESSOR(ALIGNMENT, alignment, npy_intp, 0)
+DESCR_ACCESSOR(METADATA, metadata, PyObject *, 1)
+DESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1)
+DESCR_ACCESSOR(NAMES, names, PyObject *, 1)
+DESCR_ACCESSOR(FIELDS, fields, PyObject *, 1)
+DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1)
+
+#undef DESCR_ACCESSOR
+
+
+#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
+#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION
+ static inline PyArray_ArrFuncs *
+ PyDataType_GetArrFuncs(const PyArray_Descr *descr)
+ {
+ return _PyDataType_GetArrFuncs(descr);
+ }
+#elif NPY_ABI_VERSION < 0x02000000
+ static inline PyArray_ArrFuncs *
+ PyDataType_GetArrFuncs(const PyArray_Descr *descr)
+ {
+ return descr->f;
+ }
+#else
+ static inline PyArray_ArrFuncs *
+ PyDataType_GetArrFuncs(const PyArray_Descr *descr)
+ {
+ if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {
+ return _PyDataType_GetArrFuncs(descr);
+ }
+ else {
+ return ((PyArray_DescrProto *)descr)->f;
+ }
+ }
+#endif
+
+
+#endif /* not internal build */
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_complexcompat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_complexcompat.h
new file mode 100644
index 00000000..0b509011
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_2_complexcompat.h
@@ -0,0 +1,28 @@
+/* This header is designed to be copy-pasted into downstream packages, since it provides
+ a compatibility layer between the old C struct complex types and the new native C99
+ complex types. The new macros are in numpy/npy_math.h, which is why it is included here. */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_
+
+#include
+
+#ifndef NPY_CSETREALF
+#define NPY_CSETREALF(c, r) (c)->real = (r)
+#endif
+#ifndef NPY_CSETIMAGF
+#define NPY_CSETIMAGF(c, i) (c)->imag = (i)
+#endif
+#ifndef NPY_CSETREAL
+#define NPY_CSETREAL(c, r) (c)->real = (r)
+#endif
+#ifndef NPY_CSETIMAG
+#define NPY_CSETIMAG(c, i) (c)->imag = (i)
+#endif
+#ifndef NPY_CSETREALL
+#define NPY_CSETREALL(c, r) (c)->real = (r)
+#endif
+#ifndef NPY_CSETIMAGL
+#define NPY_CSETIMAGL(c, i) (c)->imag = (i)
+#endif
+
+#endif
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_3kcompat.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_3kcompat.h
new file mode 100644
index 00000000..c2bf74fa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_3kcompat.h
@@ -0,0 +1,374 @@
+/*
+ * This is a convenience header file providing compatibility utilities
+ * for supporting different minor versions of Python 3.
+ * It was originally used to support the transition from Python 2,
+ * hence the "3k" naming.
+ *
+ * If you want to use this for your own projects, it's recommended to make a
+ * copy of it. We don't provide backwards compatibility guarantees.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
+
+#include
+#include
+
+#include "npy_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Python13 removes _PyLong_AsInt */
+static inline int
+Npy__PyLong_AsInt(PyObject *obj)
+{
+ int overflow;
+ long result = PyLong_AsLongAndOverflow(obj, &overflow);
+
+ /* INT_MAX and INT_MIN are defined in Python.h */
+ if (overflow || result > INT_MAX || result < INT_MIN) {
+ /* XXX: could be cute and give a different
+ message for overflow == -1 */
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C int");
+ return -1;
+ }
+ return (int)result;
+}
+
+#if defined _MSC_VER && _MSC_VER >= 1900
+
+#include
+
+/*
+ * Macros to protect CRT calls against instant termination when passed an
+ * invalid parameter (https://bugs.python.org/issue23524).
+ */
+extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
+#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \
+ _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
+#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); }
+
+#else
+
+#define NPY_BEGIN_SUPPRESS_IPH
+#define NPY_END_SUPPRESS_IPH
+
+#endif /* _MSC_VER >= 1900 */
+
+/*
+ * PyFile_* compatibility
+ */
+
+/*
+ * Get a FILE* handle to the file represented by the Python object
+ */
+static inline FILE*
+npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
+{
+ int fd, fd2, unbuf;
+ Py_ssize_t fd2_tmp;
+ PyObject *ret, *os, *io, *io_raw;
+ npy_off_t pos;
+ FILE *handle;
+
+ /* Flush first to ensure things end up in the file in the correct order */
+ ret = PyObject_CallMethod(file, "flush", "");
+ if (ret == NULL) {
+ return NULL;
+ }
+ Py_DECREF(ret);
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ return NULL;
+ }
+
+ /*
+ * The handle needs to be dup'd because we have to call fclose
+ * at the end
+ */
+ os = PyImport_ImportModule("os");
+ if (os == NULL) {
+ return NULL;
+ }
+ ret = PyObject_CallMethod(os, "dup", "i", fd);
+ Py_DECREF(os);
+ if (ret == NULL) {
+ return NULL;
+ }
+ fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError);
+ Py_DECREF(ret);
+ if (fd2_tmp == -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+ if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) {
+ PyErr_SetString(PyExc_IOError,
+ "Getting an 'int' from os.dup() failed");
+ return NULL;
+ }
+ fd2 = (int)fd2_tmp;
+
+ /* Convert to FILE* handle */
+#ifdef _WIN32
+ NPY_BEGIN_SUPPRESS_IPH
+ handle = _fdopen(fd2, mode);
+ NPY_END_SUPPRESS_IPH
+#else
+ handle = fdopen(fd2, mode);
+#endif
+ if (handle == NULL) {
+ PyErr_SetString(PyExc_IOError,
+ "Getting a FILE* from a Python file object via "
+ "_fdopen failed. If you built NumPy, you probably "
+ "linked with the wrong debug/release runtime");
+ return NULL;
+ }
+
+ /* Record the original raw file handle position */
+ *orig_pos = npy_ftell(handle);
+ if (*orig_pos == -1) {
+ /* The io module is needed to determine if buffering is used */
+ io = PyImport_ImportModule("io");
+ if (io == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ /* File object instances of RawIOBase are unbuffered */
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
+ Py_DECREF(io);
+ if (io_raw == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ unbuf = PyObject_IsInstance(file, io_raw);
+ Py_DECREF(io_raw);
+ if (unbuf == 1) {
+ /* Succeed if the IO is unbuffered */
+ return handle;
+ }
+ else {
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+ fclose(handle);
+ return NULL;
+ }
+ }
+
+ /* Seek raw handle to the Python-side position */
+ ret = PyObject_CallMethod(file, "tell", "");
+ if (ret == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ pos = PyLong_AsLongLong(ret);
+ Py_DECREF(ret);
+ if (PyErr_Occurred()) {
+ fclose(handle);
+ return NULL;
+ }
+ if (npy_fseek(handle, pos, SEEK_SET) == -1) {
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
+ fclose(handle);
+ return NULL;
+ }
+ return handle;
+}
+
+/*
+ * Close the dup-ed file handle, and seek the Python one to the current position
+ */
+static inline int
+npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
+{
+ int fd, unbuf;
+ PyObject *ret, *io, *io_raw;
+ npy_off_t position;
+
+ position = npy_ftell(handle);
+
+ /* Close the FILE* handle */
+ fclose(handle);
+
+ /*
+ * Restore original file handle position, in order to not confuse
+ * Python-side data structures
+ */
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ return -1;
+ }
+
+ if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {
+
+ /* The io module is needed to determine if buffering is used */
+ io = PyImport_ImportModule("io");
+ if (io == NULL) {
+ return -1;
+ }
+ /* File object instances of RawIOBase are unbuffered */
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
+ Py_DECREF(io);
+ if (io_raw == NULL) {
+ return -1;
+ }
+ unbuf = PyObject_IsInstance(file, io_raw);
+ Py_DECREF(io_raw);
+ if (unbuf == 1) {
+ /* Succeed if the IO is unbuffered */
+ return 0;
+ }
+ else {
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
+ return -1;
+ }
+ }
+
+ if (position == -1) {
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+ return -1;
+ }
+
+ /* Seek Python-side handle to the FILE* handle position */
+ ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);
+ if (ret == NULL) {
+ return -1;
+ }
+ Py_DECREF(ret);
+ return 0;
+}
+
+static inline PyObject*
+npy_PyFile_OpenFile(PyObject *filename, const char *mode)
+{
+ PyObject *open;
+ open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
+ if (open == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(open, "Os", filename, mode);
+}
+
+static inline int
+npy_PyFile_CloseFile(PyObject *file)
+{
+ PyObject *ret;
+
+ ret = PyObject_CallMethod(file, "close", NULL);
+ if (ret == NULL) {
+ return -1;
+ }
+ Py_DECREF(ret);
+ return 0;
+}
+
+/* This is a copy of _PyErr_ChainExceptions, which
+ * is no longer exported from Python3.12
+ */
+static inline void
+npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetContext(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+/* This is a copy of _PyErr_ChainExceptions, with:
+ * __cause__ used instead of __context__
+ */
+static inline void
+npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetCause(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+/*
+ * PyCObject functions adapted to PyCapsules.
+ *
+ * The main job here is to get rid of the improved error handling
+ * of PyCapsules. It's a shame...
+ */
+static inline PyObject *
+NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
+{
+ PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+static inline PyObject *
+NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
+{
+ PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
+ if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
+ PyErr_Clear();
+ Py_DECREF(ret);
+ ret = NULL;
+ }
+ return ret;
+}
+
+static inline void *
+NpyCapsule_AsVoidPtr(PyObject *obj)
+{
+ void *ret = PyCapsule_GetPointer(obj, NULL);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+static inline void *
+NpyCapsule_GetDesc(PyObject *obj)
+{
+ return PyCapsule_GetContext(obj);
+}
+
+static inline int
+NpyCapsule_Check(PyObject *ptr)
+{
+ return PyCapsule_CheckExact(ptr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_common.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_common.h
new file mode 100644
index 00000000..e2556a07
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_common.h
@@ -0,0 +1,977 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
+
+/* need Python.h for npy_intp, npy_uintp */
+#include
+
+/* numpconfig.h is auto-generated */
+#include "numpyconfig.h"
+#ifdef HAVE_NPY_CONFIG_H
+#include
+#endif
+
+/*
+ * using static inline modifiers when defining npy_math functions
+ * allows the compiler to make optimizations when possible
+ */
+#ifndef NPY_INLINE_MATH
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+ #define NPY_INLINE_MATH 1
+#else
+ #define NPY_INLINE_MATH 0
+#endif
+#endif
+
+/*
+ * gcc does not unroll even with -O3
+ * use with care, unrolling on modern cpus rarely speeds things up
+ */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS
+#define NPY_GCC_UNROLL_LOOPS \
+ __attribute__((optimize("unroll-loops")))
+#else
+#define NPY_GCC_UNROLL_LOOPS
+#endif
+
+/* highest gcc optimization level, enabled autovectorizer */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3
+#define NPY_GCC_OPT_3 __attribute__((optimize("O3")))
+#else
+#define NPY_GCC_OPT_3
+#endif
+
+/*
+ * mark an argument (starting from 1) that must not be NULL and is not checked
+ * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check
+ */
+#ifdef HAVE_ATTRIBUTE_NONNULL
+#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n)))
+#else
+#define NPY_GCC_NONNULL(n)
+#endif
+
+/*
+ * give a hint to the compiler which branch is more likely or unlikely
+ * to occur, e.g. rare error cases:
+ *
+ * if (NPY_UNLIKELY(failure == 0))
+ * return NULL;
+ *
+ * the double !! is to cast the expression (e.g. NULL) to a boolean required by
+ * the intrinsic
+ */
+#ifdef HAVE___BUILTIN_EXPECT
+#define NPY_LIKELY(x) __builtin_expect(!!(x), 1)
+#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define NPY_LIKELY(x) (x)
+#define NPY_UNLIKELY(x) (x)
+#endif
+
+#ifdef HAVE___BUILTIN_PREFETCH
+/* unlike _mm_prefetch also works on non-x86 */
+#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))
+#else
+#ifdef NPY_HAVE_SSE
+/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */
+#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \
+ (loc == 1 ? _MM_HINT_T2 : \
+ (loc == 2 ? _MM_HINT_T1 : \
+ (loc == 3 ? _MM_HINT_T0 : -1))))
+#else
+#define NPY_PREFETCH(x, rw,loc)
+#endif
+#endif
+
+/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */
+#if defined(_MSC_VER) && !defined(__clang__)
+ #define NPY_INLINE __inline
+/* clang included here to handle clang-cl on Windows */
+#elif defined(__GNUC__) || defined(__clang__)
+ #if defined(__STRICT_ANSI__)
+ #define NPY_INLINE __inline__
+ #else
+ #define NPY_INLINE inline
+ #endif
+#else
+ #define NPY_INLINE
+#endif
+
+#ifdef _MSC_VER
+ #define NPY_FINLINE static __forceinline
+#elif defined(__GNUC__)
+ #define NPY_FINLINE static inline __attribute__((always_inline))
+#else
+ #define NPY_FINLINE static
+#endif
+
+#if defined(_MSC_VER)
+ #define NPY_NOINLINE static __declspec(noinline)
+#elif defined(__GNUC__) || defined(__clang__)
+ #define NPY_NOINLINE static __attribute__((noinline))
+#else
+ #define NPY_NOINLINE static
+#endif
+
+#ifdef __cplusplus
+ #define NPY_TLS thread_local
+#elif defined(HAVE_THREAD_LOCAL)
+ #define NPY_TLS thread_local
+#elif defined(HAVE__THREAD_LOCAL)
+ #define NPY_TLS _Thread_local
+#elif defined(HAVE___THREAD)
+ #define NPY_TLS __thread
+#elif defined(HAVE___DECLSPEC_THREAD_)
+ #define NPY_TLS __declspec(thread)
+#else
+ #define NPY_TLS
+#endif
+
+#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE
+ #define NPY_RETURNS_BORROWED_REF \
+ __attribute__((cpychecker_returns_borrowed_ref))
+#else
+ #define NPY_RETURNS_BORROWED_REF
+#endif
+
+#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE
+ #define NPY_STEALS_REF_TO_ARG(n) \
+ __attribute__((cpychecker_steals_reference_to_arg(n)))
+#else
+ #define NPY_STEALS_REF_TO_ARG(n)
+#endif
+
+/* 64 bit file position support, also on win-amd64. Issue gh-2256 */
+#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \
+ defined(__MINGW32__) || defined(__MINGW64__)
+ #include
+
+ #define npy_fseek _fseeki64
+ #define npy_ftell _ftelli64
+ #define npy_lseek _lseeki64
+ #define npy_off_t npy_int64
+
+ #if NPY_SIZEOF_INT == 8
+ #define NPY_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_LONG == 8
+ #define NPY_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_LONGLONG == 8
+ #define NPY_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#else
+#ifdef HAVE_FSEEKO
+ #define npy_fseek fseeko
+#else
+ #define npy_fseek fseek
+#endif
+#ifdef HAVE_FTELLO
+ #define npy_ftell ftello
+#else
+ #define npy_ftell ftell
+#endif
+ #include
+ #ifndef _WIN32
+ #include
+ #endif
+ #define npy_lseek lseek
+ #define npy_off_t off_t
+
+ #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT
+ #define NPY_OFF_T_PYFMT "h"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT
+ #define NPY_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG
+ #define NPY_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG
+ #define NPY_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#endif
+
+/* enums for detected endianness */
+enum {
+ NPY_CPU_UNKNOWN_ENDIAN,
+ NPY_CPU_LITTLE,
+ NPY_CPU_BIG
+};
+
+/*
+ * This is to typedef npy_intp to the appropriate size for Py_ssize_t.
+ * (Before NumPy 2.0 we used Py_intptr_t and Py_uintptr_t from `pyport.h`.)
+ */
+typedef Py_ssize_t npy_intp;
+typedef size_t npy_uintp;
+
+/*
+ * Define sizes that were not defined in numpyconfig.h.
+ */
+#define NPY_SIZEOF_CHAR 1
+#define NPY_SIZEOF_BYTE 1
+#define NPY_SIZEOF_DATETIME 8
+#define NPY_SIZEOF_TIMEDELTA 8
+#define NPY_SIZEOF_HALF 2
+#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT
+#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE
+#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE
+
+#ifdef constchar
+#undef constchar
+#endif
+
+#define NPY_SSIZE_T_PYFMT "n"
+#define constchar char
+
+/* NPY_INTP_FMT Note:
+ * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf,
+ * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those
+ * functions use different formatting codes that are portably specified
+ * according to the Python documentation. See issue gh-2388.
+ */
+#if NPY_SIZEOF_INTP == NPY_SIZEOF_LONG
+ #define NPY_INTP NPY_LONG
+ #define NPY_UINTP NPY_ULONG
+ #define PyIntpArrType_Type PyLongArrType_Type
+ #define PyUIntpArrType_Type PyULongArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_LONG
+ #define NPY_MIN_INTP NPY_MIN_LONG
+ #define NPY_MAX_UINTP NPY_MAX_ULONG
+ #define NPY_INTP_FMT "ld"
+#elif NPY_SIZEOF_INTP == NPY_SIZEOF_INT
+ #define NPY_INTP NPY_INT
+ #define NPY_UINTP NPY_UINT
+ #define PyIntpArrType_Type PyIntArrType_Type
+ #define PyUIntpArrType_Type PyUIntArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_INT
+ #define NPY_MIN_INTP NPY_MIN_INT
+ #define NPY_MAX_UINTP NPY_MAX_UINT
+ #define NPY_INTP_FMT "d"
+#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_INTP == NPY_SIZEOF_LONGLONG)
+ #define NPY_INTP NPY_LONGLONG
+ #define NPY_UINTP NPY_ULONGLONG
+ #define PyIntpArrType_Type PyLongLongArrType_Type
+ #define PyUIntpArrType_Type PyULongLongArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_LONGLONG
+ #define NPY_MIN_INTP NPY_MIN_LONGLONG
+ #define NPY_MAX_UINTP NPY_MAX_ULONGLONG
+ #define NPY_INTP_FMT "lld"
+#else
+ #error "Failed to correctly define NPY_INTP and NPY_UINTP"
+#endif
+
+
+/*
+ * Some platforms don't define bool, long long, or long double.
+ * Handle that here.
+ */
+#define NPY_BYTE_FMT "hhd"
+#define NPY_UBYTE_FMT "hhu"
+#define NPY_SHORT_FMT "hd"
+#define NPY_USHORT_FMT "hu"
+#define NPY_INT_FMT "d"
+#define NPY_UINT_FMT "u"
+#define NPY_LONG_FMT "ld"
+#define NPY_ULONG_FMT "lu"
+#define NPY_HALF_FMT "g"
+#define NPY_FLOAT_FMT "g"
+#define NPY_DOUBLE_FMT "g"
+
+
+#ifdef PY_LONG_LONG
+typedef PY_LONG_LONG npy_longlong;
+typedef unsigned PY_LONG_LONG npy_ulonglong;
+# ifdef _MSC_VER
+# define NPY_LONGLONG_FMT "I64d"
+# define NPY_ULONGLONG_FMT "I64u"
+# else
+# define NPY_LONGLONG_FMT "lld"
+# define NPY_ULONGLONG_FMT "llu"
+# endif
+# ifdef _MSC_VER
+# define NPY_LONGLONG_SUFFIX(x) (x##i64)
+# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64)
+# else
+# define NPY_LONGLONG_SUFFIX(x) (x##LL)
+# define NPY_ULONGLONG_SUFFIX(x) (x##ULL)
+# endif
+#else
+typedef long npy_longlong;
+typedef unsigned long npy_ulonglong;
+# define NPY_LONGLONG_SUFFIX(x) (x##L)
+# define NPY_ULONGLONG_SUFFIX(x) (x##UL)
+#endif
+
+
+typedef unsigned char npy_bool;
+#define NPY_FALSE 0
+#define NPY_TRUE 1
+/*
+ * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double).
+ * In some certain cases, it may forced to be equal to sizeof(double)
+ * even against the compiler implementation and the same goes for
+ * `complex long double`.
+ *
+ * Therefore, avoid `long double`, use `npy_longdouble` instead,
+ * and when it comes to standard math functions make sure of using
+ * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`.
+ * For example:
+ * npy_longdouble *ptr, x;
+ * #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ * npy_longdouble r = modf(x, ptr);
+ * #else
+ * npy_longdouble r = modfl(x, ptr);
+ * #endif
+ *
+ * See https://github.com/numpy/numpy/issues/20348
+ */
+#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ #define NPY_LONGDOUBLE_FMT "g"
+ #define longdouble_t double
+ typedef double npy_longdouble;
+#else
+ #define NPY_LONGDOUBLE_FMT "Lg"
+ #define longdouble_t long double
+ typedef long double npy_longdouble;
+#endif
+
+#ifndef Py_USING_UNICODE
+#error Must use Python with unicode enabled.
+#endif
+
+
+typedef signed char npy_byte;
+typedef unsigned char npy_ubyte;
+typedef unsigned short npy_ushort;
+typedef unsigned int npy_uint;
+typedef unsigned long npy_ulong;
+
+/* These are for completeness */
+typedef char npy_char;
+typedef short npy_short;
+typedef int npy_int;
+typedef long npy_long;
+typedef float npy_float;
+typedef double npy_double;
+
+typedef Py_hash_t npy_hash_t;
+#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
+
+#if defined(__cplusplus)
+
+typedef struct
+{
+ double _Val[2];
+} npy_cdouble;
+
+typedef struct
+{
+ float _Val[2];
+} npy_cfloat;
+
+typedef struct
+{
+ long double _Val[2];
+} npy_clongdouble;
+
+#else
+
+#include
+
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+typedef _Dcomplex npy_cdouble;
+typedef _Fcomplex npy_cfloat;
+typedef _Lcomplex npy_clongdouble;
+#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) */
+typedef double _Complex npy_cdouble;
+typedef float _Complex npy_cfloat;
+typedef longdouble_t _Complex npy_clongdouble;
+#endif
+
+#endif
+
+/*
+ * numarray-style bit-width typedefs
+ */
+#define NPY_MAX_INT8 127
+#define NPY_MIN_INT8 -128
+#define NPY_MAX_UINT8 255
+#define NPY_MAX_INT16 32767
+#define NPY_MIN_INT16 -32768
+#define NPY_MAX_UINT16 65535
+#define NPY_MAX_INT32 2147483647
+#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1)
+#define NPY_MAX_UINT32 4294967295U
+#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807)
+#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615)
+#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define NPY_MIN_DATETIME NPY_MIN_INT64
+#define NPY_MAX_DATETIME NPY_MAX_INT64
+#define NPY_MIN_TIMEDELTA NPY_MIN_INT64
+#define NPY_MAX_TIMEDELTA NPY_MAX_INT64
+
+ /* Need to find the number of bits for each type and
+ make definitions accordingly.
+
+ C states that sizeof(char) == 1 by definition
+
+ So, just using the sizeof keyword won't help.
+
+ It also looks like Python itself uses sizeof(char) quite a
+ bit, which by definition should be 1 all the time.
+
+ Idea: Make Use of CHAR_BIT which should tell us how many
+ BITS per CHARACTER
+ */
+
+ /* Include platform definitions -- These are in the C89/90 standard */
+#include
+#define NPY_MAX_BYTE SCHAR_MAX
+#define NPY_MIN_BYTE SCHAR_MIN
+#define NPY_MAX_UBYTE UCHAR_MAX
+#define NPY_MAX_SHORT SHRT_MAX
+#define NPY_MIN_SHORT SHRT_MIN
+#define NPY_MAX_USHORT USHRT_MAX
+#define NPY_MAX_INT INT_MAX
+#ifndef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#endif
+#define NPY_MIN_INT INT_MIN
+#define NPY_MAX_UINT UINT_MAX
+#define NPY_MAX_LONG LONG_MAX
+#define NPY_MIN_LONG LONG_MIN
+#define NPY_MAX_ULONG ULONG_MAX
+
+#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)
+#define NPY_BITSOF_CHAR CHAR_BIT
+#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)
+#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT)
+#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT)
+#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT)
+#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT)
+#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT)
+#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT)
+#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT)
+#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT)
+#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT)
+#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT)
+#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT)
+
+#if NPY_BITSOF_LONG == 8
+#define NPY_INT8 NPY_LONG
+#define NPY_UINT8 NPY_ULONG
+ typedef long npy_int8;
+ typedef unsigned long npy_uint8;
+#define PyInt8ScalarObject PyLongScalarObject
+#define PyInt8ArrType_Type PyLongArrType_Type
+#define PyUInt8ScalarObject PyULongScalarObject
+#define PyUInt8ArrType_Type PyULongArrType_Type
+#define NPY_INT8_FMT NPY_LONG_FMT
+#define NPY_UINT8_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 16
+#define NPY_INT16 NPY_LONG
+#define NPY_UINT16 NPY_ULONG
+ typedef long npy_int16;
+ typedef unsigned long npy_uint16;
+#define PyInt16ScalarObject PyLongScalarObject
+#define PyInt16ArrType_Type PyLongArrType_Type
+#define PyUInt16ScalarObject PyULongScalarObject
+#define PyUInt16ArrType_Type PyULongArrType_Type
+#define NPY_INT16_FMT NPY_LONG_FMT
+#define NPY_UINT16_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 32
+#define NPY_INT32 NPY_LONG
+#define NPY_UINT32 NPY_ULONG
+ typedef long npy_int32;
+ typedef unsigned long npy_uint32;
+ typedef unsigned long npy_ucs4;
+#define PyInt32ScalarObject PyLongScalarObject
+#define PyInt32ArrType_Type PyLongArrType_Type
+#define PyUInt32ScalarObject PyULongScalarObject
+#define PyUInt32ArrType_Type PyULongArrType_Type
+#define NPY_INT32_FMT NPY_LONG_FMT
+#define NPY_UINT32_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 64
+#define NPY_INT64 NPY_LONG
+#define NPY_UINT64 NPY_ULONG
+ typedef long npy_int64;
+ typedef unsigned long npy_uint64;
+#define PyInt64ScalarObject PyLongScalarObject
+#define PyInt64ArrType_Type PyLongArrType_Type
+#define PyUInt64ScalarObject PyULongScalarObject
+#define PyUInt64ArrType_Type PyULongArrType_Type
+#define NPY_INT64_FMT NPY_LONG_FMT
+#define NPY_UINT64_FMT NPY_ULONG_FMT
+#define MyPyLong_FromInt64 PyLong_FromLong
+#define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+
+#if NPY_BITSOF_LONGLONG == 8
+# ifndef NPY_INT8
+# define NPY_INT8 NPY_LONGLONG
+# define NPY_UINT8 NPY_ULONGLONG
+ typedef npy_longlong npy_int8;
+ typedef npy_ulonglong npy_uint8;
+# define PyInt8ScalarObject PyLongLongScalarObject
+# define PyInt8ArrType_Type PyLongLongArrType_Type
+# define PyUInt8ScalarObject PyULongLongScalarObject
+# define PyUInt8ArrType_Type PyULongLongArrType_Type
+#define NPY_INT8_FMT NPY_LONGLONG_FMT
+#define NPY_UINT8_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT8
+# define NPY_MIN_LONGLONG NPY_MIN_INT8
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT8
+#elif NPY_BITSOF_LONGLONG == 16
+# ifndef NPY_INT16
+# define NPY_INT16 NPY_LONGLONG
+# define NPY_UINT16 NPY_ULONGLONG
+ typedef npy_longlong npy_int16;
+ typedef npy_ulonglong npy_uint16;
+# define PyInt16ScalarObject PyLongLongScalarObject
+# define PyInt16ArrType_Type PyLongLongArrType_Type
+# define PyUInt16ScalarObject PyULongLongScalarObject
+# define PyUInt16ArrType_Type PyULongLongArrType_Type
+#define NPY_INT16_FMT NPY_LONGLONG_FMT
+#define NPY_UINT16_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT16
+# define NPY_MIN_LONGLONG NPY_MIN_INT16
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT16
+#elif NPY_BITSOF_LONGLONG == 32
+# ifndef NPY_INT32
+# define NPY_INT32 NPY_LONGLONG
+# define NPY_UINT32 NPY_ULONGLONG
+ typedef npy_longlong npy_int32;
+ typedef npy_ulonglong npy_uint32;
+ typedef npy_ulonglong npy_ucs4;
+# define PyInt32ScalarObject PyLongLongScalarObject
+# define PyInt32ArrType_Type PyLongLongArrType_Type
+# define PyUInt32ScalarObject PyULongLongScalarObject
+# define PyUInt32ArrType_Type PyULongLongArrType_Type
+#define NPY_INT32_FMT NPY_LONGLONG_FMT
+#define NPY_UINT32_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT32
+# define NPY_MIN_LONGLONG NPY_MIN_INT32
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT32
+#elif NPY_BITSOF_LONGLONG == 64
+# ifndef NPY_INT64
+# define NPY_INT64 NPY_LONGLONG
+# define NPY_UINT64 NPY_ULONGLONG
+ typedef npy_longlong npy_int64;
+ typedef npy_ulonglong npy_uint64;
+# define PyInt64ScalarObject PyLongLongScalarObject
+# define PyInt64ArrType_Type PyLongLongArrType_Type
+# define PyUInt64ScalarObject PyULongLongScalarObject
+# define PyUInt64ArrType_Type PyULongLongArrType_Type
+#define NPY_INT64_FMT NPY_LONGLONG_FMT
+#define NPY_UINT64_FMT NPY_ULONGLONG_FMT
+# define MyPyLong_FromInt64 PyLong_FromLongLong
+# define MyPyLong_AsInt64 PyLong_AsLongLong
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT64
+# define NPY_MIN_LONGLONG NPY_MIN_INT64
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT64
+#endif
+
+#if NPY_BITSOF_INT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_INT
+#define NPY_UINT8 NPY_UINT
+ typedef int npy_int8;
+ typedef unsigned int npy_uint8;
+# define PyInt8ScalarObject PyIntScalarObject
+# define PyInt8ArrType_Type PyIntArrType_Type
+# define PyUInt8ScalarObject PyUIntScalarObject
+# define PyUInt8ArrType_Type PyUIntArrType_Type
+#define NPY_INT8_FMT NPY_INT_FMT
+#define NPY_UINT8_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_INT
+#define NPY_UINT16 NPY_UINT
+ typedef int npy_int16;
+ typedef unsigned int npy_uint16;
+# define PyInt16ScalarObject PyIntScalarObject
+# define PyInt16ArrType_Type PyIntArrType_Type
+# define PyUInt16ScalarObject PyIntUScalarObject
+# define PyUInt16ArrType_Type PyIntUArrType_Type
+#define NPY_INT16_FMT NPY_INT_FMT
+#define NPY_UINT16_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_INT
+#define NPY_UINT32 NPY_UINT
+ typedef int npy_int32;
+ typedef unsigned int npy_uint32;
+ typedef unsigned int npy_ucs4;
+# define PyInt32ScalarObject PyIntScalarObject
+# define PyInt32ArrType_Type PyIntArrType_Type
+# define PyUInt32ScalarObject PyUIntScalarObject
+# define PyUInt32ArrType_Type PyUIntArrType_Type
+#define NPY_INT32_FMT NPY_INT_FMT
+#define NPY_UINT32_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_INT
+#define NPY_UINT64 NPY_UINT
+ typedef int npy_int64;
+ typedef unsigned int npy_uint64;
+# define PyInt64ScalarObject PyIntScalarObject
+# define PyInt64ArrType_Type PyIntArrType_Type
+# define PyUInt64ScalarObject PyUIntScalarObject
+# define PyUInt64ArrType_Type PyUIntArrType_Type
+#define NPY_INT64_FMT NPY_INT_FMT
+#define NPY_UINT64_FMT NPY_UINT_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#endif
+
+#if NPY_BITSOF_SHORT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_SHORT
+#define NPY_UINT8 NPY_USHORT
+ typedef short npy_int8;
+ typedef unsigned short npy_uint8;
+# define PyInt8ScalarObject PyShortScalarObject
+# define PyInt8ArrType_Type PyShortArrType_Type
+# define PyUInt8ScalarObject PyUShortScalarObject
+# define PyUInt8ArrType_Type PyUShortArrType_Type
+#define NPY_INT8_FMT NPY_SHORT_FMT
+#define NPY_UINT8_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_SHORT
+#define NPY_UINT16 NPY_USHORT
+ typedef short npy_int16;
+ typedef unsigned short npy_uint16;
+# define PyInt16ScalarObject PyShortScalarObject
+# define PyInt16ArrType_Type PyShortArrType_Type
+# define PyUInt16ScalarObject PyUShortScalarObject
+# define PyUInt16ArrType_Type PyUShortArrType_Type
+#define NPY_INT16_FMT NPY_SHORT_FMT
+#define NPY_UINT16_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_SHORT
+#define NPY_UINT32 NPY_USHORT
+ typedef short npy_int32;
+ typedef unsigned short npy_uint32;
+ typedef unsigned short npy_ucs4;
+# define PyInt32ScalarObject PyShortScalarObject
+# define PyInt32ArrType_Type PyShortArrType_Type
+# define PyUInt32ScalarObject PyUShortScalarObject
+# define PyUInt32ArrType_Type PyUShortArrType_Type
+#define NPY_INT32_FMT NPY_SHORT_FMT
+#define NPY_UINT32_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_SHORT
+#define NPY_UINT64 NPY_USHORT
+ typedef short npy_int64;
+ typedef unsigned short npy_uint64;
+# define PyInt64ScalarObject PyShortScalarObject
+# define PyInt64ArrType_Type PyShortArrType_Type
+# define PyUInt64ScalarObject PyUShortScalarObject
+# define PyUInt64ArrType_Type PyUShortArrType_Type
+#define NPY_INT64_FMT NPY_SHORT_FMT
+#define NPY_UINT64_FMT NPY_USHORT_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#endif
+
+
+#if NPY_BITSOF_CHAR == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_BYTE
+#define NPY_UINT8 NPY_UBYTE
+ typedef signed char npy_int8;
+ typedef unsigned char npy_uint8;
+# define PyInt8ScalarObject PyByteScalarObject
+# define PyInt8ArrType_Type PyByteArrType_Type
+# define PyUInt8ScalarObject PyUByteScalarObject
+# define PyUInt8ArrType_Type PyUByteArrType_Type
+#define NPY_INT8_FMT NPY_BYTE_FMT
+#define NPY_UINT8_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_BYTE
+#define NPY_UINT16 NPY_UBYTE
+ typedef signed char npy_int16;
+ typedef unsigned char npy_uint16;
+# define PyInt16ScalarObject PyByteScalarObject
+# define PyInt16ArrType_Type PyByteArrType_Type
+# define PyUInt16ScalarObject PyUByteScalarObject
+# define PyUInt16ArrType_Type PyUByteArrType_Type
+#define NPY_INT16_FMT NPY_BYTE_FMT
+#define NPY_UINT16_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_BYTE
+#define NPY_UINT32 NPY_UBYTE
+ typedef signed char npy_int32;
+ typedef unsigned char npy_uint32;
+ typedef unsigned char npy_ucs4;
+# define PyInt32ScalarObject PyByteScalarObject
+# define PyInt32ArrType_Type PyByteArrType_Type
+# define PyUInt32ScalarObject PyUByteScalarObject
+# define PyUInt32ArrType_Type PyUByteArrType_Type
+#define NPY_INT32_FMT NPY_BYTE_FMT
+#define NPY_UINT32_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_BYTE
+#define NPY_UINT64 NPY_UBYTE
+ typedef signed char npy_int64;
+ typedef unsigned char npy_uint64;
+# define PyInt64ScalarObject PyByteScalarObject
+# define PyInt64ArrType_Type PyByteArrType_Type
+# define PyUInt64ScalarObject PyUByteScalarObject
+# define PyUInt64ArrType_Type PyUByteArrType_Type
+#define NPY_INT64_FMT NPY_BYTE_FMT
+#define NPY_UINT64_FMT NPY_UBYTE_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_CHAR == 128
+#endif
+
+
+
+#if NPY_BITSOF_DOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_DOUBLE
+#define NPY_COMPLEX64 NPY_CDOUBLE
+ typedef double npy_float32;
+ typedef npy_cdouble npy_complex64;
+# define PyFloat32ScalarObject PyDoubleScalarObject
+# define PyComplex64ScalarObject PyCDoubleScalarObject
+# define PyFloat32ArrType_Type PyDoubleArrType_Type
+# define PyComplex64ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_DOUBLE
+#define NPY_COMPLEX128 NPY_CDOUBLE
+ typedef double npy_float64;
+ typedef npy_cdouble npy_complex128;
+# define PyFloat64ScalarObject PyDoubleScalarObject
+# define PyComplex128ScalarObject PyCDoubleScalarObject
+# define PyFloat64ArrType_Type PyDoubleArrType_Type
+# define PyComplex128ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_DOUBLE
+#define NPY_COMPLEX160 NPY_CDOUBLE
+ typedef double npy_float80;
+ typedef npy_cdouble npy_complex160;
+# define PyFloat80ScalarObject PyDoubleScalarObject
+# define PyComplex160ScalarObject PyCDoubleScalarObject
+# define PyFloat80ArrType_Type PyDoubleArrType_Type
+# define PyComplex160ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_DOUBLE
+#define NPY_COMPLEX192 NPY_CDOUBLE
+ typedef double npy_float96;
+ typedef npy_cdouble npy_complex192;
+# define PyFloat96ScalarObject PyDoubleScalarObject
+# define PyComplex192ScalarObject PyCDoubleScalarObject
+# define PyFloat96ArrType_Type PyDoubleArrType_Type
+# define PyComplex192ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_DOUBLE
+#define NPY_COMPLEX256 NPY_CDOUBLE
+ typedef double npy_float128;
+ typedef npy_cdouble npy_complex256;
+# define PyFloat128ScalarObject PyDoubleScalarObject
+# define PyComplex256ScalarObject PyCDoubleScalarObject
+# define PyFloat128ArrType_Type PyDoubleArrType_Type
+# define PyComplex256ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT
+#endif
+#endif
+
+
+
+#if NPY_BITSOF_FLOAT == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_FLOAT
+#define NPY_COMPLEX64 NPY_CFLOAT
+ typedef float npy_float32;
+ typedef npy_cfloat npy_complex64;
+# define PyFloat32ScalarObject PyFloatScalarObject
+# define PyComplex64ScalarObject PyCFloatScalarObject
+# define PyFloat32ArrType_Type PyFloatArrType_Type
+# define PyComplex64ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT32_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_FLOAT
+#define NPY_COMPLEX128 NPY_CFLOAT
+ typedef float npy_float64;
+ typedef npy_cfloat npy_complex128;
+# define PyFloat64ScalarObject PyFloatScalarObject
+# define PyComplex128ScalarObject PyCFloatScalarObject
+# define PyFloat64ArrType_Type PyFloatArrType_Type
+# define PyComplex128ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT64_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_FLOAT
+#define NPY_COMPLEX160 NPY_CFLOAT
+ typedef float npy_float80;
+ typedef npy_cfloat npy_complex160;
+# define PyFloat80ScalarObject PyFloatScalarObject
+# define PyComplex160ScalarObject PyCFloatScalarObject
+# define PyFloat80ArrType_Type PyFloatArrType_Type
+# define PyComplex160ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT80_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_FLOAT
+#define NPY_COMPLEX192 NPY_CFLOAT
+ typedef float npy_float96;
+ typedef npy_cfloat npy_complex192;
+# define PyFloat96ScalarObject PyFloatScalarObject
+# define PyComplex192ScalarObject PyCFloatScalarObject
+# define PyFloat96ArrType_Type PyFloatArrType_Type
+# define PyComplex192ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT96_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_FLOAT
+#define NPY_COMPLEX256 NPY_CFLOAT
+ typedef float npy_float128;
+ typedef npy_cfloat npy_complex256;
+# define PyFloat128ScalarObject PyFloatScalarObject
+# define PyComplex256ScalarObject PyCFloatScalarObject
+# define PyFloat128ArrType_Type PyFloatArrType_Type
+# define PyComplex256ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT128_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT
+#endif
+#endif
+
+/* half/float16 isn't a floating-point type in C */
+#define NPY_FLOAT16 NPY_HALF
+typedef npy_uint16 npy_half;
+typedef npy_half npy_float16;
+
+#if NPY_BITSOF_LONGDOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_LONGDOUBLE
+#define NPY_COMPLEX64 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float32;
+ typedef npy_clongdouble npy_complex64;
+# define PyFloat32ScalarObject PyLongDoubleScalarObject
+# define PyComplex64ScalarObject PyCLongDoubleScalarObject
+# define PyFloat32ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_LONGDOUBLE
+#define NPY_COMPLEX128 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float64;
+ typedef npy_clongdouble npy_complex128;
+# define PyFloat64ScalarObject PyLongDoubleScalarObject
+# define PyComplex128ScalarObject PyCLongDoubleScalarObject
+# define PyFloat64ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_LONGDOUBLE
+#define NPY_COMPLEX160 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float80;
+ typedef npy_clongdouble npy_complex160;
+# define PyFloat80ScalarObject PyLongDoubleScalarObject
+# define PyComplex160ScalarObject PyCLongDoubleScalarObject
+# define PyFloat80ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_LONGDOUBLE
+#define NPY_COMPLEX192 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float96;
+ typedef npy_clongdouble npy_complex192;
+# define PyFloat96ScalarObject PyLongDoubleScalarObject
+# define PyComplex192ScalarObject PyCLongDoubleScalarObject
+# define PyFloat96ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_LONGDOUBLE
+#define NPY_COMPLEX256 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float128;
+ typedef npy_clongdouble npy_complex256;
+# define PyFloat128ScalarObject PyLongDoubleScalarObject
+# define PyComplex256ScalarObject PyCLongDoubleScalarObject
+# define PyFloat128ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#endif
+
+/* datetime typedefs */
+typedef npy_int64 npy_timedelta;
+typedef npy_int64 npy_datetime;
+#define NPY_DATETIME_FMT NPY_INT64_FMT
+#define NPY_TIMEDELTA_FMT NPY_INT64_FMT
+
+/* End of typedefs for numarray style bit-width names */
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_cpu.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_cpu.h
new file mode 100644
index 00000000..91cf2d82
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_cpu.h
@@ -0,0 +1,124 @@
+/*
+ * This set (target) cpu specific macros:
+ * - Possible values:
+ * NPY_CPU_X86
+ * NPY_CPU_AMD64
+ * NPY_CPU_PPC
+ * NPY_CPU_PPC64
+ * NPY_CPU_PPC64LE
+ * NPY_CPU_SPARC
+ * NPY_CPU_S390
+ * NPY_CPU_IA64
+ * NPY_CPU_HPPA
+ * NPY_CPU_ALPHA
+ * NPY_CPU_ARMEL
+ * NPY_CPU_ARMEB
+ * NPY_CPU_SH_LE
+ * NPY_CPU_SH_BE
+ * NPY_CPU_ARCEL
+ * NPY_CPU_ARCEB
+ * NPY_CPU_RISCV64
+ * NPY_CPU_RISCV32
+ * NPY_CPU_LOONGARCH
+ * NPY_CPU_WASM
+ */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
+
+#include "numpyconfig.h"
+
+#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
+ /*
+ * __i386__ is defined by gcc and Intel compiler on Linux,
+ * _M_IX86 by VS compiler,
+ * i386 by Sun compilers on opensolaris at least
+ */
+ #define NPY_CPU_X86
+#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
+ /*
+ * both __x86_64__ and __amd64__ are defined by gcc
+ * __x86_64 defined by sun compiler on opensolaris at least
+ * _M_AMD64 defined by MS compiler
+ */
+ #define NPY_CPU_AMD64
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_PPC64LE
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_PPC64
+#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
+ /*
+ * __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
+ * but can't find it ATM
+ * _ARCH_PPC is used by at least gcc on AIX
+ * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
+ * for those specifically first before defaulting to ppc
+ */
+ #define NPY_CPU_PPC
+#elif defined(__sparc__) || defined(__sparc)
+ /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
+ #define NPY_CPU_SPARC
+#elif defined(__s390__)
+ #define NPY_CPU_S390
+#elif defined(__ia64)
+ #define NPY_CPU_IA64
+#elif defined(__hppa)
+ #define NPY_CPU_HPPA
+#elif defined(__alpha__)
+ #define NPY_CPU_ALPHA
+#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)
+ /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */
+ #if defined(__ARMEB__) || defined(__AARCH64EB__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH64
+ #else
+ #define NPY_CPU_ARMEB
+ #endif
+ #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH32
+ #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
+ #define NPY_CPU_ARMEL_AARCH64
+ #else
+ #define NPY_CPU_ARMEL
+ #endif
+ #else
+ # error Unknown ARM CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+ #endif
+#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_SH_LE
+#elif defined(__sh__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_SH_BE
+#elif defined(__MIPSEL__)
+ #define NPY_CPU_MIPSEL
+#elif defined(__MIPSEB__)
+ #define NPY_CPU_MIPSEB
+#elif defined(__or1k__)
+ #define NPY_CPU_OR1K
+#elif defined(__mc68000__)
+ #define NPY_CPU_M68K
+#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_ARCEL
+#elif defined(__arc__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_ARCEB
+#elif defined(__riscv)
+ #if __riscv_xlen == 64
+ #define NPY_CPU_RISCV64
+ #elif __riscv_xlen == 32
+ #define NPY_CPU_RISCV32
+ #endif
+#elif defined(__loongarch_lp64)
+ #define NPY_CPU_LOONGARCH64
+#elif defined(__EMSCRIPTEN__)
+ /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
+ #define NPY_CPU_WASM
+#else
+ #error Unknown CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+#endif
+
+#define NPY_ALIGNMENT_REQUIRED 1
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_endian.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_endian.h
new file mode 100644
index 00000000..09262120
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_endian.h
@@ -0,0 +1,78 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
+
+/*
+ * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
+ * endian.h
+ */
+
+#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H)
+ /* Use endian.h if available */
+
+ #if defined(NPY_HAVE_ENDIAN_H)
+ #include
+ #elif defined(NPY_HAVE_SYS_ENDIAN_H)
+ #include
+ #endif
+
+ #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN BIG_ENDIAN
+ #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER _BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN _BIG_ENDIAN
+ #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER __BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN __BIG_ENDIAN
+ #endif
+#endif
+
+#ifndef NPY_BYTE_ORDER
+ /* Set endianness info using target CPU */
+ #include "npy_cpu.h"
+
+ #define NPY_LITTLE_ENDIAN 1234
+ #define NPY_BIG_ENDIAN 4321
+
+ #if defined(NPY_CPU_X86) \
+ || defined(NPY_CPU_AMD64) \
+ || defined(NPY_CPU_IA64) \
+ || defined(NPY_CPU_ALPHA) \
+ || defined(NPY_CPU_ARMEL) \
+ || defined(NPY_CPU_ARMEL_AARCH32) \
+ || defined(NPY_CPU_ARMEL_AARCH64) \
+ || defined(NPY_CPU_SH_LE) \
+ || defined(NPY_CPU_MIPSEL) \
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL) \
+ || defined(NPY_CPU_RISCV64) \
+ || defined(NPY_CPU_RISCV32) \
+ || defined(NPY_CPU_LOONGARCH) \
+ || defined(NPY_CPU_WASM)
+ #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
+
+ #elif defined(NPY_CPU_PPC) \
+ || defined(NPY_CPU_SPARC) \
+ || defined(NPY_CPU_S390) \
+ || defined(NPY_CPU_HPPA) \
+ || defined(NPY_CPU_PPC64) \
+ || defined(NPY_CPU_ARMEB) \
+ || defined(NPY_CPU_ARMEB_AARCH32) \
+ || defined(NPY_CPU_ARMEB_AARCH64) \
+ || defined(NPY_CPU_SH_BE) \
+ || defined(NPY_CPU_MIPSEB) \
+ || defined(NPY_CPU_OR1K) \
+ || defined(NPY_CPU_M68K) \
+ || defined(NPY_CPU_ARCEB)
+ #define NPY_BYTE_ORDER NPY_BIG_ENDIAN
+
+ #else
+ #error Unknown CPU: can not set endianness
+ #endif
+
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_math.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_math.h
new file mode 100644
index 00000000..abc784bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_math.h
@@ -0,0 +1,602 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
+
+#include
+
+#include
+
+/* By adding static inline specifiers to npy_math function definitions when
+ appropriate, compiler is given the opportunity to optimize */
+#if NPY_INLINE_MATH
+#define NPY_INPLACE static inline
+#else
+#define NPY_INPLACE
+#endif
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PyArray_MAX(a,b) (((a)>(b))?(a):(b))
+#define PyArray_MIN(a,b) (((a)<(b))?(a):(b))
+
+/*
+ * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99
+ * for INFINITY)
+ *
+ * XXX: I should test whether INFINITY and NAN are available on the platform
+ */
+static inline float __npy_inff(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
+ return __bint.__f;
+}
+
+static inline float __npy_nanf(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
+ return __bint.__f;
+}
+
+static inline float __npy_pzerof(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
+ return __bint.__f;
+}
+
+static inline float __npy_nzerof(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
+ return __bint.__f;
+}
+
+#define NPY_INFINITYF __npy_inff()
+#define NPY_NANF __npy_nanf()
+#define NPY_PZEROF __npy_pzerof()
+#define NPY_NZEROF __npy_nzerof()
+
+#define NPY_INFINITY ((npy_double)NPY_INFINITYF)
+#define NPY_NAN ((npy_double)NPY_NANF)
+#define NPY_PZERO ((npy_double)NPY_PZEROF)
+#define NPY_NZERO ((npy_double)NPY_NZEROF)
+
+#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)
+#define NPY_NANL ((npy_longdouble)NPY_NANF)
+#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)
+#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)
+
+/*
+ * Useful constants
+ */
+#define NPY_E 2.718281828459045235360287471352662498 /* e */
+#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */
+#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */
+#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */
+#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */
+#define NPY_PI 3.141592653589793238462643383279502884 /* pi */
+#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */
+#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */
+#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */
+#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */
+#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */
+#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */
+#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
+
+#define NPY_Ef 2.718281828459045235360287471352662498F /* e */
+#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */
+#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */
+#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */
+#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */
+#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */
+#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */
+#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */
+#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */
+#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */
+#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */
+#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */
+#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */
+
+#define NPY_El 2.718281828459045235360287471352662498L /* e */
+#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */
+#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */
+#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */
+#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */
+#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */
+#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */
+#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */
+#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */
+#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */
+#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */
+#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
+#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
+
+/*
+ * Integer functions.
+ */
+NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a);
+NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a);
+NPY_INPLACE uint8_t npy_popcountu(npy_uint a);
+NPY_INPLACE uint8_t npy_popcountul(npy_ulong a);
+NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a);
+NPY_INPLACE uint8_t npy_popcounthh(npy_byte a);
+NPY_INPLACE uint8_t npy_popcounth(npy_short a);
+NPY_INPLACE uint8_t npy_popcount(npy_int a);
+NPY_INPLACE uint8_t npy_popcountl(npy_long a);
+NPY_INPLACE uint8_t npy_popcountll(npy_longlong a);
+
+/*
+ * C99 double math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE double npy_sin(double x);
+NPY_INPLACE double npy_cos(double x);
+NPY_INPLACE double npy_tan(double x);
+NPY_INPLACE double npy_hypot(double x, double y);
+NPY_INPLACE double npy_log2(double x);
+NPY_INPLACE double npy_atan2(double x, double y);
+
+/* Mandatory C99 double math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+#define npy_sinh sinh
+#define npy_cosh cosh
+#define npy_tanh tanh
+#define npy_asin asin
+#define npy_acos acos
+#define npy_atan atan
+#define npy_log log
+#define npy_log10 log10
+#define npy_cbrt cbrt
+#define npy_fabs fabs
+#define npy_ceil ceil
+#define npy_fmod fmod
+#define npy_floor floor
+#define npy_expm1 expm1
+#define npy_log1p log1p
+#define npy_acosh acosh
+#define npy_asinh asinh
+#define npy_atanh atanh
+#define npy_rint rint
+#define npy_trunc trunc
+#define npy_exp2 exp2
+#define npy_frexp frexp
+#define npy_ldexp ldexp
+#define npy_copysign copysign
+#define npy_exp exp
+#define npy_sqrt sqrt
+#define npy_pow pow
+#define npy_modf modf
+#define npy_nextafter nextafter
+
+double npy_spacing(double x);
+
+/*
+ * IEEE 754 fpu handling
+ */
+
+/* use builtins to avoid function calls in tight loops
+ * only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISNAN
+ #define npy_isnan(x) __builtin_isnan(x)
+#else
+ #define npy_isnan(x) isnan(x)
+#endif
+
+
+/* only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISFINITE
+ #define npy_isfinite(x) __builtin_isfinite(x)
+#else
+ #define npy_isfinite(x) isfinite((x))
+#endif
+
+/* only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISINF
+ #define npy_isinf(x) __builtin_isinf(x)
+#else
+ #define npy_isinf(x) isinf((x))
+#endif
+
+#define npy_signbit(x) signbit((x))
+
+/*
+ * float C99 math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE float npy_sinf(float x);
+NPY_INPLACE float npy_cosf(float x);
+NPY_INPLACE float npy_tanf(float x);
+NPY_INPLACE float npy_expf(float x);
+NPY_INPLACE float npy_sqrtf(float x);
+NPY_INPLACE float npy_hypotf(float x, float y);
+NPY_INPLACE float npy_log2f(float x);
+NPY_INPLACE float npy_atan2f(float x, float y);
+NPY_INPLACE float npy_powf(float x, float y);
+NPY_INPLACE float npy_modff(float x, float* y);
+
+/* Mandatory C99 float math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+
+#define npy_sinhf sinhf
+#define npy_coshf coshf
+#define npy_tanhf tanhf
+#define npy_asinf asinf
+#define npy_acosf acosf
+#define npy_atanf atanf
+#define npy_logf logf
+#define npy_log10f log10f
+#define npy_cbrtf cbrtf
+#define npy_fabsf fabsf
+#define npy_ceilf ceilf
+#define npy_fmodf fmodf
+#define npy_floorf floorf
+#define npy_expm1f expm1f
+#define npy_log1pf log1pf
+#define npy_asinhf asinhf
+#define npy_acoshf acoshf
+#define npy_atanhf atanhf
+#define npy_rintf rintf
+#define npy_truncf truncf
+#define npy_exp2f exp2f
+#define npy_frexpf frexpf
+#define npy_ldexpf ldexpf
+#define npy_copysignf copysignf
+#define npy_nextafterf nextafterf
+
+float npy_spacingf(float x);
+
+/*
+ * long double C99 double math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
+
+/* Mandatory C99 double math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+#define npy_sinhl sinhl
+#define npy_coshl coshl
+#define npy_tanhl tanhl
+#define npy_fabsl fabsl
+#define npy_floorl floorl
+#define npy_ceill ceill
+#define npy_rintl rintl
+#define npy_truncl truncl
+#define npy_cbrtl cbrtl
+#define npy_log10l log10l
+#define npy_logl logl
+#define npy_expm1l expm1l
+#define npy_asinl asinl
+#define npy_acosl acosl
+#define npy_atanl atanl
+#define npy_asinhl asinhl
+#define npy_acoshl acoshl
+#define npy_atanhl atanhl
+#define npy_log1pl log1pl
+#define npy_exp2l exp2l
+#define npy_fmodl fmodl
+#define npy_frexpl frexpl
+#define npy_ldexpl ldexpl
+#define npy_copysignl copysignl
+#define npy_nextafterl nextafterl
+
+npy_longdouble npy_spacingl(npy_longdouble x);
+
+/*
+ * Non standard functions
+ */
+NPY_INPLACE double npy_deg2rad(double x);
+NPY_INPLACE double npy_rad2deg(double x);
+NPY_INPLACE double npy_logaddexp(double x, double y);
+NPY_INPLACE double npy_logaddexp2(double x, double y);
+NPY_INPLACE double npy_divmod(double x, double y, double *modulus);
+NPY_INPLACE double npy_heaviside(double x, double h0);
+
+NPY_INPLACE float npy_deg2radf(float x);
+NPY_INPLACE float npy_rad2degf(float x);
+NPY_INPLACE float npy_logaddexpf(float x, float y);
+NPY_INPLACE float npy_logaddexp2f(float x, float y);
+NPY_INPLACE float npy_divmodf(float x, float y, float *modulus);
+NPY_INPLACE float npy_heavisidef(float x, float h0);
+
+NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,
+ npy_longdouble *modulus);
+NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
+
+#define npy_degrees npy_rad2deg
+#define npy_degreesf npy_rad2degf
+#define npy_degreesl npy_rad2degl
+
+#define npy_radians npy_deg2rad
+#define npy_radiansf npy_deg2radf
+#define npy_radiansl npy_deg2radl
+
+/*
+ * Complex declarations
+ */
+
+static inline double npy_creal(const npy_cdouble z)
+{
+#if defined(__cplusplus)
+ return z._Val[0];
+#else
+ return creal(z);
+#endif
+}
+
+static inline void npy_csetreal(npy_cdouble *z, const double r)
+{
+ ((double *) z)[0] = r;
+}
+
+static inline double npy_cimag(const npy_cdouble z)
+{
+#if defined(__cplusplus)
+ return z._Val[1];
+#else
+ return cimag(z);
+#endif
+}
+
+static inline void npy_csetimag(npy_cdouble *z, const double i)
+{
+ ((double *) z)[1] = i;
+}
+
+static inline float npy_crealf(const npy_cfloat z)
+{
+#if defined(__cplusplus)
+ return z._Val[0];
+#else
+ return crealf(z);
+#endif
+}
+
+static inline void npy_csetrealf(npy_cfloat *z, const float r)
+{
+ ((float *) z)[0] = r;
+}
+
+static inline float npy_cimagf(const npy_cfloat z)
+{
+#if defined(__cplusplus)
+ return z._Val[1];
+#else
+ return cimagf(z);
+#endif
+}
+
+static inline void npy_csetimagf(npy_cfloat *z, const float i)
+{
+ ((float *) z)[1] = i;
+}
+
+static inline npy_longdouble npy_creall(const npy_clongdouble z)
+{
+#if defined(__cplusplus)
+ return (npy_longdouble)z._Val[0];
+#else
+ return creall(z);
+#endif
+}
+
+static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r)
+{
+ ((longdouble_t *) z)[0] = r;
+}
+
+static inline npy_longdouble npy_cimagl(const npy_clongdouble z)
+{
+#if defined(__cplusplus)
+ return (npy_longdouble)z._Val[1];
+#else
+ return cimagl(z);
+#endif
+}
+
+static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i)
+{
+ ((longdouble_t *) z)[1] = i;
+}
+
+#define NPY_CSETREAL(z, r) npy_csetreal(z, r)
+#define NPY_CSETIMAG(z, i) npy_csetimag(z, i)
+#define NPY_CSETREALF(z, r) npy_csetrealf(z, r)
+#define NPY_CSETIMAGF(z, i) npy_csetimagf(z, i)
+#define NPY_CSETREALL(z, r) npy_csetreall(z, r)
+#define NPY_CSETIMAGL(z, i) npy_csetimagl(z, i)
+
+static inline npy_cdouble npy_cpack(double x, double y)
+{
+ npy_cdouble z;
+ npy_csetreal(&z, x);
+ npy_csetimag(&z, y);
+ return z;
+}
+
+static inline npy_cfloat npy_cpackf(float x, float y)
+{
+ npy_cfloat z;
+ npy_csetrealf(&z, x);
+ npy_csetimagf(&z, y);
+ return z;
+}
+
+static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
+{
+ npy_clongdouble z;
+ npy_csetreall(&z, x);
+ npy_csetimagl(&z, y);
+ return z;
+}
+
+/*
+ * Double precision complex functions
+ */
+double npy_cabs(npy_cdouble z);
+double npy_carg(npy_cdouble z);
+
+npy_cdouble npy_cexp(npy_cdouble z);
+npy_cdouble npy_clog(npy_cdouble z);
+npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);
+
+npy_cdouble npy_csqrt(npy_cdouble z);
+
+npy_cdouble npy_ccos(npy_cdouble z);
+npy_cdouble npy_csin(npy_cdouble z);
+npy_cdouble npy_ctan(npy_cdouble z);
+
+npy_cdouble npy_ccosh(npy_cdouble z);
+npy_cdouble npy_csinh(npy_cdouble z);
+npy_cdouble npy_ctanh(npy_cdouble z);
+
+npy_cdouble npy_cacos(npy_cdouble z);
+npy_cdouble npy_casin(npy_cdouble z);
+npy_cdouble npy_catan(npy_cdouble z);
+
+npy_cdouble npy_cacosh(npy_cdouble z);
+npy_cdouble npy_casinh(npy_cdouble z);
+npy_cdouble npy_catanh(npy_cdouble z);
+
+/*
+ * Single precision complex functions
+ */
+float npy_cabsf(npy_cfloat z);
+float npy_cargf(npy_cfloat z);
+
+npy_cfloat npy_cexpf(npy_cfloat z);
+npy_cfloat npy_clogf(npy_cfloat z);
+npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);
+
+npy_cfloat npy_csqrtf(npy_cfloat z);
+
+npy_cfloat npy_ccosf(npy_cfloat z);
+npy_cfloat npy_csinf(npy_cfloat z);
+npy_cfloat npy_ctanf(npy_cfloat z);
+
+npy_cfloat npy_ccoshf(npy_cfloat z);
+npy_cfloat npy_csinhf(npy_cfloat z);
+npy_cfloat npy_ctanhf(npy_cfloat z);
+
+npy_cfloat npy_cacosf(npy_cfloat z);
+npy_cfloat npy_casinf(npy_cfloat z);
+npy_cfloat npy_catanf(npy_cfloat z);
+
+npy_cfloat npy_cacoshf(npy_cfloat z);
+npy_cfloat npy_casinhf(npy_cfloat z);
+npy_cfloat npy_catanhf(npy_cfloat z);
+
+
+/*
+ * Extended precision complex functions
+ */
+npy_longdouble npy_cabsl(npy_clongdouble z);
+npy_longdouble npy_cargl(npy_clongdouble z);
+
+npy_clongdouble npy_cexpl(npy_clongdouble z);
+npy_clongdouble npy_clogl(npy_clongdouble z);
+npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);
+
+npy_clongdouble npy_csqrtl(npy_clongdouble z);
+
+npy_clongdouble npy_ccosl(npy_clongdouble z);
+npy_clongdouble npy_csinl(npy_clongdouble z);
+npy_clongdouble npy_ctanl(npy_clongdouble z);
+
+npy_clongdouble npy_ccoshl(npy_clongdouble z);
+npy_clongdouble npy_csinhl(npy_clongdouble z);
+npy_clongdouble npy_ctanhl(npy_clongdouble z);
+
+npy_clongdouble npy_cacosl(npy_clongdouble z);
+npy_clongdouble npy_casinl(npy_clongdouble z);
+npy_clongdouble npy_catanl(npy_clongdouble z);
+
+npy_clongdouble npy_cacoshl(npy_clongdouble z);
+npy_clongdouble npy_casinhl(npy_clongdouble z);
+npy_clongdouble npy_catanhl(npy_clongdouble z);
+
+
+/*
+ * Functions that set the floating point error
+ * status word.
+ */
+
+/*
+ * platform-dependent code translates floating point
+ * status to an integer sum of these values
+ */
+#define NPY_FPE_DIVIDEBYZERO 1
+#define NPY_FPE_OVERFLOW 2
+#define NPY_FPE_UNDERFLOW 4
+#define NPY_FPE_INVALID 8
+
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
+int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
+void npy_set_floatstatus_divbyzero(void);
+void npy_set_floatstatus_overflow(void);
+void npy_set_floatstatus_underflow(void);
+void npy_set_floatstatus_invalid(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#if NPY_INLINE_MATH
+#include "npy_math_internal.h"
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_no_deprecated_api.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_no_deprecated_api.h
new file mode 100644
index 00000000..39658c0b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_no_deprecated_api.h
@@ -0,0 +1,20 @@
+/*
+ * This include file is provided for inclusion in Cython *.pyd files where
+ * one would like to define the NPY_NO_DEPRECATED_API macro. It can be
+ * included by
+ *
+ * cdef extern from "npy_no_deprecated_api.h": pass
+ *
+ */
+#ifndef NPY_NO_DEPRECATED_API
+
+/* put this check here since there may be multiple includes in C extensions. */
+#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \
+ defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \
+ defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_)
+#error "npy_no_deprecated_api.h" must be first among numpy includes.
+#else
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#endif
+
+#endif /* NPY_NO_DEPRECATED_API */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_os.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_os.h
new file mode 100644
index 00000000..0ce5d78b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/npy_os.h
@@ -0,0 +1,42 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
+
+#if defined(linux) || defined(__linux) || defined(__linux__)
+ #define NPY_OS_LINUX
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__OpenBSD__) || defined(__DragonFly__)
+ #define NPY_OS_BSD
+ #ifdef __FreeBSD__
+ #define NPY_OS_FREEBSD
+ #elif defined(__NetBSD__)
+ #define NPY_OS_NETBSD
+ #elif defined(__OpenBSD__)
+ #define NPY_OS_OPENBSD
+ #elif defined(__DragonFly__)
+ #define NPY_OS_DRAGONFLY
+ #endif
+#elif defined(sun) || defined(__sun)
+ #define NPY_OS_SOLARIS
+#elif defined(__CYGWIN__)
+ #define NPY_OS_CYGWIN
+/* We are on Windows.*/
+#elif defined(_WIN32)
+ /* We are using MinGW (64-bit or 32-bit)*/
+ #if defined(__MINGW32__) || defined(__MINGW64__)
+ #define NPY_OS_MINGW
+ /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/
+ #elif defined(_WIN64)
+ #define NPY_OS_WIN64
+ /* Otherwise assume we are targeting 32-bit Windows*/
+ #else
+ #define NPY_OS_WIN32
+ #endif
+#elif defined(__APPLE__)
+ #define NPY_OS_DARWIN
+#elif defined(__HAIKU__)
+ #define NPY_OS_HAIKU
+#else
+ #define NPY_OS_UNKNOWN
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/numpyconfig.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/numpyconfig.h
new file mode 100644
index 00000000..ba44c28b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/numpyconfig.h
@@ -0,0 +1,182 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
+
+#include "_numpyconfig.h"
+
+/*
+ * On Mac OS X, because there is only one configuration stage for all the archs
+ * in universal builds, any macro which depends on the arch needs to be
+ * hardcoded.
+ *
+ * Note that distutils/pip will attempt a universal2 build when Python itself
+ * is built as universal2, hence this hardcoding is needed even if we do not
+ * support universal2 wheels anymore (see gh-22796).
+ * This code block can be removed after we have dropped the setup.py based
+ * build completely.
+ */
+#ifdef __APPLE__
+ #undef NPY_SIZEOF_LONG
+
+ #ifdef __LP64__
+ #define NPY_SIZEOF_LONG 8
+ #else
+ #define NPY_SIZEOF_LONG 4
+ #endif
+
+ #undef NPY_SIZEOF_LONGDOUBLE
+ #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
+ #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE
+ #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE
+ #endif
+ #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
+ #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
+ #endif
+
+ #if defined(__arm64__)
+ #define NPY_SIZEOF_LONGDOUBLE 8
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
+ #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1
+ #elif defined(__x86_64)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+ #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1
+ #elif defined (__i386)
+ #define NPY_SIZEOF_LONGDOUBLE 12
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
+ #elif defined(__ppc__) || defined (__ppc64__)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+ #else
+ #error "unknown architecture"
+ #endif
+#endif
+
+
+/**
+ * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro,
+ * we include API version numbers for specific versions of NumPy.
+ * To exclude all API that was deprecated as of 1.7, add the following before
+ * #including any NumPy headers:
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ * The same is true for NPY_TARGET_VERSION, although NumPy will default to
+ * a backwards compatible build anyway.
+ */
+#define NPY_1_7_API_VERSION 0x00000007
+#define NPY_1_8_API_VERSION 0x00000008
+#define NPY_1_9_API_VERSION 0x00000009
+#define NPY_1_10_API_VERSION 0x0000000a
+#define NPY_1_11_API_VERSION 0x0000000a
+#define NPY_1_12_API_VERSION 0x0000000a
+#define NPY_1_13_API_VERSION 0x0000000b
+#define NPY_1_14_API_VERSION 0x0000000c
+#define NPY_1_15_API_VERSION 0x0000000c
+#define NPY_1_16_API_VERSION 0x0000000d
+#define NPY_1_17_API_VERSION 0x0000000d
+#define NPY_1_18_API_VERSION 0x0000000d
+#define NPY_1_19_API_VERSION 0x0000000d
+#define NPY_1_20_API_VERSION 0x0000000e
+#define NPY_1_21_API_VERSION 0x0000000e
+#define NPY_1_22_API_VERSION 0x0000000f
+#define NPY_1_23_API_VERSION 0x00000010
+#define NPY_1_24_API_VERSION 0x00000010
+#define NPY_1_25_API_VERSION 0x00000011
+#define NPY_2_0_API_VERSION 0x00000012
+#define NPY_2_1_API_VERSION 0x00000013
+#define NPY_2_2_API_VERSION 0x00000013
+#define NPY_2_3_API_VERSION 0x00000014
+
+
+/*
+ * Binary compatibility version number. This number is increased
+ * whenever the C-API is changed such that binary compatibility is
+ * broken, i.e. whenever a recompile of extension modules is needed.
+ */
+#define NPY_VERSION NPY_ABI_VERSION
+
+/*
+ * Minor API version we are compiling to be compatible with. The version
+ * Number is always increased when the API changes via: `NPY_API_VERSION`
+ * (and should maybe just track the NumPy version).
+ *
+ * If we have an internal build, we always target the current version of
+ * course.
+ *
+ * For downstream users, we default to an older version to provide them with
+ * maximum compatibility by default. Downstream can choose to extend that
+ * default, or narrow it down if they wish to use newer API. If you adjust
+ * this, consider the Python version support (example for 1.25.x):
+ *
+ * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12)
+ * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9
+ * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8
+ * NumPy 1.15.x supports Python: ... 3.6 3.7
+ *
+ * Users of the stable ABI may wish to target the last Python that is not
+ * end of life. This would be 3.8 at NumPy 1.25 release time.
+ * 1.17 as default was the choice of oldest-support-numpy at the time and
+ * has in practice no limit (compared to 1.19). Even earlier becomes legacy.
+ */
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+ /* NumPy internal build, always use current version. */
+ #define NPY_FEATURE_VERSION NPY_API_VERSION
+#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION
+ /* user provided a target version, use it */
+ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION
+#else
+ /* Use the default (increase when dropping Python 3.11 support) */
+ #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION
+#endif
+
+/* Sanity check the (requested) feature version */
+#if NPY_FEATURE_VERSION > NPY_API_VERSION
+ #error "NPY_TARGET_VERSION higher than NumPy headers!"
+#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION
+ /* No support for irrelevant old targets, no need for error, but warn. */
+ #ifndef _MSC_VER
+ #warning "Requested NumPy target lower than supported NumPy 1.15."
+ #else
+ #define _WARN___STR2__(x) #x
+ #define _WARN___STR1__(x) _WARN___STR2__(x)
+ #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
+ #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.")
+ #endif
+#endif
+
+/*
+ * We define a human readable translation to the Python version of NumPy
+ * for error messages (and also to allow grepping the binaries for conda).
+ */
+#if NPY_FEATURE_VERSION == NPY_1_7_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "1.7"
+#elif NPY_FEATURE_VERSION == NPY_1_8_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "1.8"
+#elif NPY_FEATURE_VERSION == NPY_1_9_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "1.9"
+#elif NPY_FEATURE_VERSION == NPY_1_10_API_VERSION /* also 1.11, 1.12 */
+ #define NPY_FEATURE_VERSION_STRING "1.10"
+#elif NPY_FEATURE_VERSION == NPY_1_13_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "1.13"
+#elif NPY_FEATURE_VERSION == NPY_1_14_API_VERSION /* also 1.15 */
+ #define NPY_FEATURE_VERSION_STRING "1.14"
+#elif NPY_FEATURE_VERSION == NPY_1_16_API_VERSION /* also 1.17, 1.18, 1.19 */
+ #define NPY_FEATURE_VERSION_STRING "1.16"
+#elif NPY_FEATURE_VERSION == NPY_1_20_API_VERSION /* also 1.21 */
+ #define NPY_FEATURE_VERSION_STRING "1.20"
+#elif NPY_FEATURE_VERSION == NPY_1_22_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "1.22"
+#elif NPY_FEATURE_VERSION == NPY_1_23_API_VERSION /* also 1.24 */
+ #define NPY_FEATURE_VERSION_STRING "1.23"
+#elif NPY_FEATURE_VERSION == NPY_1_25_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "1.25"
+#elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "2.0"
+#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "2.1"
+#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION
+ #define NPY_FEATURE_VERSION_STRING "2.3"
+#else
+ #error "Missing version string define for new NumPy version."
+#endif
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/LICENSE.txt b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/LICENSE.txt
new file mode 100644
index 00000000..d72a7c38
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/LICENSE.txt
@@ -0,0 +1,21 @@
+ zlib License
+ ------------
+
+ Copyright (C) 2010 - 2019 ridiculous_fish,
+ Copyright (C) 2016 - 2019 Kim Walisch,
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/bitgen.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/bitgen.h
new file mode 100644
index 00000000..162dd5c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/bitgen.h
@@ -0,0 +1,20 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
+
+#pragma once
+#include
+#include
+#include
+
+/* Must match the declaration in numpy/random/.pxd */
+
+typedef struct bitgen {
+ void *state;
+ uint64_t (*next_uint64)(void *st);
+ uint32_t (*next_uint32)(void *st);
+ double (*next_double)(void *st);
+ uint64_t (*next_raw)(void *st);
+} bitgen_t;
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/distributions.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/distributions.h
new file mode 100644
index 00000000..e7fa4bd0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/distributions.h
@@ -0,0 +1,209 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include "numpy/npy_common.h"
+#include
+#include
+#include
+
+#include "numpy/npy_math.h"
+#include "numpy/random/bitgen.h"
+
+/*
+ * RAND_INT_TYPE is used to share integer generators with RandomState which
+ * used long in place of int64_t. If changing a distribution that uses
+ * RAND_INT_TYPE, then the original unmodified copy must be retained for
+ * use in RandomState by copying to the legacy distributions source file.
+ */
+#ifdef NP_RANDOM_LEGACY
+#define RAND_INT_TYPE long
+#define RAND_INT_MAX LONG_MAX
+#else
+#define RAND_INT_TYPE int64_t
+#define RAND_INT_MAX INT64_MAX
+#endif
+
+#ifdef _MSC_VER
+#define DECLDIR __declspec(dllexport)
+#else
+#define DECLDIR extern
+#endif
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? x : y)
+#define MAX(x, y) (((x) > (y)) ? x : y)
+#endif
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338328
+#endif
+
+typedef struct s_binomial_t {
+ int has_binomial; /* !=0: following parameters initialized for binomial */
+ double psave;
+ RAND_INT_TYPE nsave;
+ double r;
+ double q;
+ double fm;
+ RAND_INT_TYPE m;
+ double p1;
+ double xm;
+ double xl;
+ double xr;
+ double c;
+ double laml;
+ double lamr;
+ double p2;
+ double p3;
+ double p4;
+} binomial_t;
+
+DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
+DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
+DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
+DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
+DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
+DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
+
+DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
+DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
+DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
+DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
+
+DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
+
+DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
+DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
+
+DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
+DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
+DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
+DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
+DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
+DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
+DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
+DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
+DECLDIR double random_power(bitgen_t *bitgen_state, double a);
+DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
+DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
+DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
+DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc);
+DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc);
+DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
+DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
+DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right);
+
+DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
+DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
+ double p);
+
+DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
+
+DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p);
+DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
+DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad, int64_t sample);
+DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
+
+/* Generate random uint64 numbers in closed interval [off, off + rng]. */
+DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, uint64_t mask,
+ bool use_masked);
+
+/* Generate random uint32 numbers in closed interval [off, off + rng]. */
+DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng,
+ uint32_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng,
+ uint16_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, uint8_t mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_bool mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+
+DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, npy_intp cnt,
+ bool use_masked, uint64_t *out);
+DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
+ uint32_t rng, npy_intp cnt,
+ bool use_masked, uint32_t *out);
+DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
+ uint16_t rng, npy_intp cnt,
+ bool use_masked, uint16_t *out);
+DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, npy_intp cnt,
+ bool use_masked, uint8_t *out);
+DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_intp cnt,
+ bool use_masked, npy_bool *out);
+
+DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
+ double *pix, npy_intp d, binomial_t *binomial);
+
+/* multivariate hypergeometric, "count" method */
+DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* multivariate hypergeometric, "marginals" method */
+DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* Common to legacy-distributions.c and distributions.c but not exported */
+
+RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+double random_loggam(double x);
+static inline double next_double(bitgen_t *bitgen_state) {
+ return bitgen_state->next_double(bitgen_state->state);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/libdivide.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/libdivide.h
new file mode 100644
index 00000000..f4eb8039
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/random/libdivide.h
@@ -0,0 +1,2079 @@
+// libdivide.h - Optimized integer division
+// https://libdivide.com
+//
+// Copyright (C) 2010 - 2019 ridiculous_fish,
+// Copyright (C) 2016 - 2019 Kim Walisch,
+//
+// libdivide is dual-licensed under the Boost or zlib licenses.
+// You may use libdivide under the terms of either of these.
+// See LICENSE.txt for more details.
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
+
+#define LIBDIVIDE_VERSION "3.0"
+#define LIBDIVIDE_VERSION_MAJOR 3
+#define LIBDIVIDE_VERSION_MINOR 0
+
+#include
+
+#if defined(__cplusplus)
+ #include
+ #include
+ #include
+#else
+ #include
+ #include
+#endif
+
+#if defined(LIBDIVIDE_AVX512)
+ #include
+#elif defined(LIBDIVIDE_AVX2)
+ #include
+#elif defined(LIBDIVIDE_SSE2)
+ #include
+#endif
+
+#if defined(_MSC_VER)
+ #include
+ // disable warning C4146: unary minus operator applied
+ // to unsigned type, result still unsigned
+ #pragma warning(disable: 4146)
+ #define LIBDIVIDE_VC
+#endif
+
+#if !defined(__has_builtin)
+ #define __has_builtin(x) 0
+#endif
+
+#if defined(__SIZEOF_INT128__)
+ #define HAS_INT128_T
+ // clang-cl on Windows does not yet support 128-bit division
+ #if !(defined(__clang__) && defined(LIBDIVIDE_VC))
+ #define HAS_INT128_DIV
+ #endif
+#endif
+
+#if defined(__x86_64__) || defined(_M_X64)
+ #define LIBDIVIDE_X86_64
+#endif
+
+#if defined(__i386__)
+ #define LIBDIVIDE_i386
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+ #define LIBDIVIDE_GCC_STYLE_ASM
+#endif
+
+#if defined(__cplusplus) || defined(LIBDIVIDE_VC)
+ #define LIBDIVIDE_FUNCTION __FUNCTION__
+#else
+ #define LIBDIVIDE_FUNCTION __func__
+#endif
+
+#define LIBDIVIDE_ERROR(msg) \
+ do { \
+ fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \
+ __LINE__, LIBDIVIDE_FUNCTION, msg); \
+ abort(); \
+ } while (0)
+
+#if defined(LIBDIVIDE_ASSERTIONS_ON)
+ #define LIBDIVIDE_ASSERT(x) \
+ do { \
+ if (!(x)) { \
+ fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \
+ __LINE__, LIBDIVIDE_FUNCTION, #x); \
+ abort(); \
+ } \
+ } while (0)
+#else
+ #define LIBDIVIDE_ASSERT(x)
+#endif
+
+#ifdef __cplusplus
+namespace libdivide {
+#endif
+
+// pack divider structs to prevent compilers from padding.
+// This reduces memory usage by up to 43% when using a large
+// array of libdivide dividers and improves performance
+// by up to 10% because of reduced memory bandwidth.
+#pragma pack(push, 1)
+
+struct libdivide_u32_t {
+ uint32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s32_t {
+ int32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_u64_t {
+ uint64_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s64_t {
+ int64_t magic;
+ uint8_t more;
+};
+
+struct libdivide_u32_branchfree_t {
+ uint32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s32_branchfree_t {
+ int32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_u64_branchfree_t {
+ uint64_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s64_branchfree_t {
+ int64_t magic;
+ uint8_t more;
+};
+
+#pragma pack(pop)
+
+// Explanation of the "more" field:
+//
+// * Bits 0-5 is the shift value (for shift path or mult path).
+// * Bit 6 is the add indicator for mult path.
+// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative
+// divisor indicator so that we can efficiently use sign extension to
+// create a bitmask with all bits set to 1 (if the divisor is negative)
+// or 0 (if the divisor is positive).
+//
+// u32: [0-4] shift value
+// [5] ignored
+// [6] add indicator
+// magic number of 0 indicates shift path
+//
+// s32: [0-4] shift value
+// [5] ignored
+// [6] add indicator
+// [7] indicates negative divisor
+// magic number of 0 indicates shift path
+//
+// u64: [0-5] shift value
+// [6] add indicator
+// magic number of 0 indicates shift path
+//
+// s64: [0-5] shift value
+// [6] add indicator
+// [7] indicates negative divisor
+// magic number of 0 indicates shift path
+//
+// In s32 and s64 branchfree modes, the magic number is negated according to
+// whether the divisor is negated. In branchfree strategy, it is not negated.
+
+enum {
+ LIBDIVIDE_32_SHIFT_MASK = 0x1F,
+ LIBDIVIDE_64_SHIFT_MASK = 0x3F,
+ LIBDIVIDE_ADD_MARKER = 0x40,
+ LIBDIVIDE_NEGATIVE_DIVISOR = 0x80
+};
+
+static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d);
+static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d);
+static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d);
+static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d);
+
+static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d);
+static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d);
+static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d);
+static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d);
+
+static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom);
+static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom);
+static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom);
+static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom);
+
+static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom);
+static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom);
+static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom);
+static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom);
+
+static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom);
+static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom);
+static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom);
+static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom);
+
+static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom);
+static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom);
+static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom);
+static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) {
+ uint64_t xl = x, yl = y;
+ uint64_t rl = xl * yl;
+ return (uint32_t)(rl >> 32);
+}
+
+static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) {
+ int64_t xl = x, yl = y;
+ int64_t rl = xl * yl;
+ // needs to be arithmetic shift
+ return (int32_t)(rl >> 32);
+}
+
+static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
+#if defined(LIBDIVIDE_VC) && \
+ defined(LIBDIVIDE_X86_64)
+ return __umulh(x, y);
+#elif defined(HAS_INT128_T)
+ __uint128_t xl = x, yl = y;
+ __uint128_t rl = xl * yl;
+ return (uint64_t)(rl >> 64);
+#else
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
+ uint32_t mask = 0xFFFFFFFF;
+ uint32_t x0 = (uint32_t)(x & mask);
+ uint32_t x1 = (uint32_t)(x >> 32);
+ uint32_t y0 = (uint32_t)(y & mask);
+ uint32_t y1 = (uint32_t)(y >> 32);
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
+ uint64_t x0y1 = x0 * (uint64_t)y1;
+ uint64_t x1y0 = x1 * (uint64_t)y0;
+ uint64_t x1y1 = x1 * (uint64_t)y1;
+ uint64_t temp = x1y0 + x0y0_hi;
+ uint64_t temp_lo = temp & mask;
+ uint64_t temp_hi = temp >> 32;
+
+ return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32);
+#endif
+}
+
+static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) {
+#if defined(LIBDIVIDE_VC) && \
+ defined(LIBDIVIDE_X86_64)
+ return __mulh(x, y);
+#elif defined(HAS_INT128_T)
+ __int128_t xl = x, yl = y;
+ __int128_t rl = xl * yl;
+ return (int64_t)(rl >> 64);
+#else
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
+ uint32_t mask = 0xFFFFFFFF;
+ uint32_t x0 = (uint32_t)(x & mask);
+ uint32_t y0 = (uint32_t)(y & mask);
+ int32_t x1 = (int32_t)(x >> 32);
+ int32_t y1 = (int32_t)(y >> 32);
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
+ int64_t t = x1 * (int64_t)y0 + x0y0_hi;
+ int64_t w1 = x0 * (int64_t)y1 + (t & mask);
+
+ return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32);
+#endif
+}
+
+static inline int32_t libdivide_count_leading_zeros32(uint32_t val) {
+#if defined(__GNUC__) || \
+ __has_builtin(__builtin_clz)
+ // Fast way to count leading zeros
+ return __builtin_clz(val);
+#elif defined(LIBDIVIDE_VC)
+ unsigned long result;
+ if (_BitScanReverse(&result, val)) {
+ return 31 - result;
+ }
+ return 0;
+#else
+ if (val == 0)
+ return 32;
+ int32_t result = 8;
+ uint32_t hi = 0xFFU << 24;
+ while ((val & hi) == 0) {
+ hi >>= 8;
+ result += 8;
+ }
+ while (val & hi) {
+ result -= 1;
+ hi <<= 1;
+ }
+ return result;
+#endif
+}
+
+static inline int32_t libdivide_count_leading_zeros64(uint64_t val) {
+#if defined(__GNUC__) || \
+ __has_builtin(__builtin_clzll)
+ // Fast way to count leading zeros
+ return __builtin_clzll(val);
+#elif defined(LIBDIVIDE_VC) && defined(_WIN64)
+ unsigned long result;
+ if (_BitScanReverse64(&result, val)) {
+ return 63 - result;
+ }
+ return 0;
+#else
+ uint32_t hi = val >> 32;
+ uint32_t lo = val & 0xFFFFFFFF;
+ if (hi != 0) return libdivide_count_leading_zeros32(hi);
+ return 32 + libdivide_count_leading_zeros32(lo);
+#endif
+}
+
+// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit
+// uint {v}. The result must fit in 32 bits.
+// Returns the quotient directly and the remainder in *r
+static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) {
+#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
+ uint32_t result;
+ __asm__("divl %[v]"
+ : "=a"(result), "=d"(*r)
+ : [v] "r"(v), "a"(u0), "d"(u1)
+ );
+ return result;
+#else
+ uint64_t n = ((uint64_t)u1 << 32) | u0;
+ uint32_t result = (uint32_t)(n / v);
+ *r = (uint32_t)(n - result * (uint64_t)v);
+ return result;
+#endif
+}
+
+// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit
+// uint {v}. The result must fit in 64 bits.
+// Returns the quotient directly and the remainder in *r
+static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) {
+#if defined(LIBDIVIDE_X86_64) && \
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
+ uint64_t result;
+ __asm__("divq %[v]"
+ : "=a"(result), "=d"(*r)
+ : [v] "r"(v), "a"(u0), "d"(u1)
+ );
+ return result;
+#elif defined(HAS_INT128_T) && \
+ defined(HAS_INT128_DIV)
+ __uint128_t n = ((__uint128_t)u1 << 64) | u0;
+ uint64_t result = (uint64_t)(n / v);
+ *r = (uint64_t)(n - result * (__uint128_t)v);
+ return result;
+#else
+ // Code taken from Hacker's Delight:
+ // http://www.hackersdelight.org/HDcode/divlu.c.
+ // License permits inclusion here per:
+ // http://www.hackersdelight.org/permissions.htm
+
+ const uint64_t b = (1ULL << 32); // Number base (32 bits)
+ uint64_t un1, un0; // Norm. dividend LSD's
+ uint64_t vn1, vn0; // Norm. divisor digits
+ uint64_t q1, q0; // Quotient digits
+ uint64_t un64, un21, un10; // Dividend digit pairs
+ uint64_t rhat; // A remainder
+ int32_t s; // Shift amount for norm
+
+ // If overflow, set rem. to an impossible value,
+ // and return the largest possible quotient
+ if (u1 >= v) {
+ *r = (uint64_t) -1;
+ return (uint64_t) -1;
+ }
+
+ // count leading zeros
+ s = libdivide_count_leading_zeros64(v);
+ if (s > 0) {
+ // Normalize divisor
+ v = v << s;
+ un64 = (u1 << s) | (u0 >> (64 - s));
+ un10 = u0 << s; // Shift dividend left
+ } else {
+ // Avoid undefined behavior of (u0 >> 64).
+ // The behavior is undefined if the right operand is
+ // negative, or greater than or equal to the length
+ // in bits of the promoted left operand.
+ un64 = u1;
+ un10 = u0;
+ }
+
+ // Break divisor up into two 32-bit digits
+ vn1 = v >> 32;
+ vn0 = v & 0xFFFFFFFF;
+
+ // Break right half of dividend into two digits
+ un1 = un10 >> 32;
+ un0 = un10 & 0xFFFFFFFF;
+
+ // Compute the first quotient digit, q1
+ q1 = un64 / vn1;
+ rhat = un64 - q1 * vn1;
+
+ while (q1 >= b || q1 * vn0 > b * rhat + un1) {
+ q1 = q1 - 1;
+ rhat = rhat + vn1;
+ if (rhat >= b)
+ break;
+ }
+
+ // Multiply and subtract
+ un21 = un64 * b + un1 - q1 * v;
+
+ // Compute the second quotient digit
+ q0 = un21 / vn1;
+ rhat = un21 - q0 * vn1;
+
+ while (q0 >= b || q0 * vn0 > b * rhat + un0) {
+ q0 = q0 - 1;
+ rhat = rhat + vn1;
+ if (rhat >= b)
+ break;
+ }
+
+ *r = (un21 * b + un0 - q0 * v) >> s;
+ return q1 * b + q0;
+#endif
+}
+
+// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0)
+static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) {
+ if (signed_shift > 0) {
+ uint32_t shift = signed_shift;
+ *u1 <<= shift;
+ *u1 |= *u0 >> (64 - shift);
+ *u0 <<= shift;
+ }
+ else if (signed_shift < 0) {
+ uint32_t shift = -signed_shift;
+ *u0 >>= shift;
+ *u0 |= *u1 << (64 - shift);
+ *u1 >>= shift;
+ }
+}
+
+// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder.
+static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) {
+#if defined(HAS_INT128_T) && \
+ defined(HAS_INT128_DIV)
+ __uint128_t ufull = u_hi;
+ __uint128_t vfull = v_hi;
+ ufull = (ufull << 64) | u_lo;
+ vfull = (vfull << 64) | v_lo;
+ uint64_t res = (uint64_t)(ufull / vfull);
+ __uint128_t remainder = ufull - (vfull * res);
+ *r_lo = (uint64_t)remainder;
+ *r_hi = (uint64_t)(remainder >> 64);
+ return res;
+#else
+ // Adapted from "Unsigned Doubleword Division" in Hacker's Delight
+ // We want to compute u / v
+ typedef struct { uint64_t hi; uint64_t lo; } u128_t;
+ u128_t u = {u_hi, u_lo};
+ u128_t v = {v_hi, v_lo};
+
+ if (v.hi == 0) {
+ // divisor v is a 64 bit value, so we just need one 128/64 division
+ // Note that we are simpler than Hacker's Delight here, because we know
+ // the quotient fits in 64 bits whereas Hacker's Delight demands a full
+ // 128 bit quotient
+ *r_hi = 0;
+ return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo);
+ }
+ // Here v >= 2**64
+ // We know that v.hi != 0, so count leading zeros is OK
+ // We have 0 <= n <= 63
+ uint32_t n = libdivide_count_leading_zeros64(v.hi);
+
+ // Normalize the divisor so its MSB is 1
+ u128_t v1t = v;
+ libdivide_u128_shift(&v1t.hi, &v1t.lo, n);
+ uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64
+
+ // To ensure no overflow
+ u128_t u1 = u;
+ libdivide_u128_shift(&u1.hi, &u1.lo, -1);
+
+ // Get quotient from divide unsigned insn.
+ uint64_t rem_ignored;
+ uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored);
+
+ // Undo normalization and division of u by 2.
+ u128_t q0 = {0, q1};
+ libdivide_u128_shift(&q0.hi, &q0.lo, n);
+ libdivide_u128_shift(&q0.hi, &q0.lo, -63);
+
+ // Make q0 correct or too small by 1
+ // Equivalent to `if (q0 != 0) q0 = q0 - 1;`
+ if (q0.hi != 0 || q0.lo != 0) {
+ q0.hi -= (q0.lo == 0); // borrow
+ q0.lo -= 1;
+ }
+
+ // Now q0 is correct.
+ // Compute q0 * v as q0v
+ // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo)
+ // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) +
+ // (q0.lo * v.hi << 64) + q0.lo * v.lo)
+ // Each term is 128 bit
+ // High half of full product (upper 128 bits!) are dropped
+ u128_t q0v = {0, 0};
+ q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo);
+ q0v.lo = q0.lo*v.lo;
+
+ // Compute u - q0v as u_q0v
+ // This is the remainder
+ u128_t u_q0v = u;
+ u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow
+ u_q0v.lo -= q0v.lo;
+
+ // Check if u_q0v >= v
+ // This checks if our remainder is larger than the divisor
+ if ((u_q0v.hi > v.hi) ||
+ (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) {
+ // Increment q0
+ q0.lo += 1;
+ q0.hi += (q0.lo == 0); // carry
+
+ // Subtract v from remainder
+ u_q0v.hi -= v.hi + (u_q0v.lo < v.lo);
+ u_q0v.lo -= v.lo;
+ }
+
+ *r_hi = u_q0v.hi;
+ *r_lo = u_q0v.lo;
+
+ LIBDIVIDE_ASSERT(q0.hi == 0);
+ return q0.lo;
+#endif
+}
+
+////////// UINT32
+
+static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_u32_t result;
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d);
+
+ // Power of 2
+ if ((d & (d - 1)) == 0) {
+ // We need to subtract 1 from the shift value in case of an unsigned
+ // branchfree divider because there is a hardcoded right shift by 1
+ // in its division algorithm. Because of this we also need to add back
+ // 1 in its recovery algorithm.
+ result.magic = 0;
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
+ } else {
+ uint8_t more;
+ uint32_t rem, proposed_m;
+ proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem);
+
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
+ const uint32_t e = d - rem;
+
+ // This power works if e < 2**floor_log_2_d.
+ if (!branchfree && (e < (1U << floor_log_2_d))) {
+ // This power works
+ more = floor_log_2_d;
+ } else {
+ // We have to use the general 33-bit algorithm. We need to compute
+ // (2**power) / d. However, we already have (2**(power-1))/d and
+ // its remainder. By doubling both, and then correcting the
+ // remainder, we can compute the larger division.
+ // don't care about overflow here - in fact, we expect it
+ proposed_m += proposed_m;
+ const uint32_t twice_rem = rem + rem;
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+ result.magic = 1 + proposed_m;
+ result.more = more;
+ // result.more's shift should in general be ceil_log_2_d. But if we
+ // used the smaller power, we subtract one from the shift because we're
+ // using the smaller power. If we're using the larger power, we
+ // subtract one from the shift because it's taken care of by the add
+ // indicator. So floor_log_2_d happens to be correct in both cases.
+ }
+ return result;
+}
+
+struct libdivide_u32_t libdivide_u32_gen(uint32_t d) {
+ return libdivide_internal_u32_gen(d, 0);
+}
+
+struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) {
+ if (d == 1) {
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
+ }
+ struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1);
+ struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)};
+ return ret;
+}
+
+uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return numer >> more;
+ }
+ else {
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ uint32_t t = ((numer - q) >> 1) + q;
+ return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
+ }
+ else {
+ // All upper bits are 0,
+ // don't need to mask them off.
+ return q >> more;
+ }
+ }
+}
+
+uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) {
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
+ uint32_t t = ((numer - q) >> 1) + q;
+ return t >> denom->more;
+}
+
+uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1U << shift;
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
+ // We compute q = n/d = n*m / 2^(32 + shift)
+ // Therefore we have d = 2^(32 + shift) / m
+ // We need to ceil it.
+ // We know d is not a power of 2, so m is not a power of 2,
+ // so we can just add 1 to the floor
+ uint32_t hi_dividend = 1U << shift;
+ uint32_t rem_ignored;
+ return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored);
+ } else {
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
+ // Also note that shift may be as high as 31, so shift + 1 will
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
+ // then double the quotient and remainder.
+ uint64_t half_n = 1ULL << (32 + shift);
+ uint64_t d = (1ULL << 32) | denom->magic;
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
+ // may need 33!
+ uint32_t half_q = (uint32_t)(half_n / d);
+ uint64_t rem = half_n % d;
+ // We computed 2^(32+shift)/(m+2^32)
+ // Need to double it, and then add 1 to the quotient if doubling th
+ // remainder would increase the quotient.
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
+
+ // We rounded down in gen (hence +1)
+ return full_q + 1;
+ }
+}
+
+uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1U << (shift + 1);
+ } else {
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
+ // Also note that shift may be as high as 31, so shift + 1 will
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
+ // then double the quotient and remainder.
+ uint64_t half_n = 1ULL << (32 + shift);
+ uint64_t d = (1ULL << 32) | denom->magic;
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
+ // may need 33!
+ uint32_t half_q = (uint32_t)(half_n / d);
+ uint64_t rem = half_n % d;
+ // We computed 2^(32+shift)/(m+2^32)
+ // Need to double it, and then add 1 to the quotient if doubling th
+ // remainder would increase the quotient.
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
+
+ // We rounded down in gen (hence +1)
+ return full_q + 1;
+ }
+}
+
+/////////// UINT64
+
+static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_u64_t result;
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d);
+
+ // Power of 2
+ if ((d & (d - 1)) == 0) {
+ // We need to subtract 1 from the shift value in case of an unsigned
+ // branchfree divider because there is a hardcoded right shift by 1
+ // in its division algorithm. Because of this we also need to add back
+ // 1 in its recovery algorithm.
+ result.magic = 0;
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
+ } else {
+ uint64_t proposed_m, rem;
+ uint8_t more;
+ // (1 << (64 + floor_log_2_d)) / d
+ proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem);
+
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
+ const uint64_t e = d - rem;
+
+ // This power works if e < 2**floor_log_2_d.
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
+ // This power works
+ more = floor_log_2_d;
+ } else {
+ // We have to use the general 65-bit algorithm. We need to compute
+ // (2**power) / d. However, we already have (2**(power-1))/d and
+ // its remainder. By doubling both, and then correcting the
+ // remainder, we can compute the larger division.
+ // don't care about overflow here - in fact, we expect it
+ proposed_m += proposed_m;
+ const uint64_t twice_rem = rem + rem;
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+ result.magic = 1 + proposed_m;
+ result.more = more;
+ // result.more's shift should in general be ceil_log_2_d. But if we
+ // used the smaller power, we subtract one from the shift because we're
+ // using the smaller power. If we're using the larger power, we
+ // subtract one from the shift because it's taken care of by the add
+ // indicator. So floor_log_2_d happens to be correct in both cases,
+ // which is why we do it outside of the if statement.
+ }
+ return result;
+}
+
+struct libdivide_u64_t libdivide_u64_gen(uint64_t d) {
+ return libdivide_internal_u64_gen(d, 0);
+}
+
+struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) {
+ if (d == 1) {
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
+ }
+ struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1);
+ struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)};
+ return ret;
+}
+
+uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return numer >> more;
+ }
+ else {
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ uint64_t t = ((numer - q) >> 1) + q;
+ return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
+ }
+ else {
+ // All upper bits are 0,
+ // don't need to mask them off.
+ return q >> more;
+ }
+ }
+}
+
+uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) {
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
+ uint64_t t = ((numer - q) >> 1) + q;
+ return t >> denom->more;
+}
+
+uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1ULL << shift;
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
+ // We compute q = n/d = n*m / 2^(64 + shift)
+ // Therefore we have d = 2^(64 + shift) / m
+ // We need to ceil it.
+ // We know d is not a power of 2, so m is not a power of 2,
+ // so we can just add 1 to the floor
+ uint64_t hi_dividend = 1ULL << shift;
+ uint64_t rem_ignored;
+ return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored);
+ } else {
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
+ // libdivide_u32_recover for more on what we do here.
+ // TODO: do something better than 128 bit math
+
+ // Full n is a (potentially) 129 bit value
+ // half_n is a 128 bit value
+ // Compute the hi half of half_n. Low half is 0.
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
+ // d is a 65 bit value. The high bit is always set to 1.
+ const uint64_t d_hi = 1, d_lo = denom->magic;
+ // Note that the quotient is guaranteed <= 64 bits,
+ // but the remainder may need 65!
+ uint64_t r_hi, r_lo;
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
+ // We computed 2^(64+shift)/(m+2^64)
+ // Double the remainder ('dr') and check if that is larger than d
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
+ // cannot overflow
+ uint64_t dr_lo = r_lo + r_lo;
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
+ return full_q + 1;
+ }
+}
+
+uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1ULL << (shift + 1);
+ } else {
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
+ // libdivide_u32_recover for more on what we do here.
+ // TODO: do something better than 128 bit math
+
+ // Full n is a (potentially) 129 bit value
+ // half_n is a 128 bit value
+ // Compute the hi half of half_n. Low half is 0.
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
+ // d is a 65 bit value. The high bit is always set to 1.
+ const uint64_t d_hi = 1, d_lo = denom->magic;
+ // Note that the quotient is guaranteed <= 64 bits,
+ // but the remainder may need 65!
+ uint64_t r_hi, r_lo;
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
+ // We computed 2^(64+shift)/(m+2^64)
+ // Double the remainder ('dr') and check if that is larger than d
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
+ // cannot overflow
+ uint64_t dr_lo = r_lo + r_lo;
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
+ return full_q + 1;
+ }
+}
+
+/////////// SINT32
+
+static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_s32_t result;
+
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
+ // This is especially important because the magic algorithm fails for -1.
+ // To check if d is a power of 2 or its inverse, it suffices to check
+ // whether its absolute value has exactly one bit set. This works even for
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
+ // and is a power of 2.
+ uint32_t ud = (uint32_t)d;
+ uint32_t absD = (d < 0) ? -ud : ud;
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD);
+ // check if exactly one bit is set,
+ // don't care if absD is 0 since that's divide by zero
+ if ((absD & (absD - 1)) == 0) {
+ // Branchfree and normal paths are exactly the same
+ result.magic = 0;
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
+ } else {
+ LIBDIVIDE_ASSERT(floor_log_2_d >= 1);
+
+ uint8_t more;
+ // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word
+ // is 0 and the high word is floor_log_2_d - 1
+ uint32_t rem, proposed_m;
+ proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem);
+ const uint32_t e = absD - rem;
+
+ // We are going to start with a power of floor_log_2_d - 1.
+ // This works if works if e < 2**floor_log_2_d.
+ if (!branchfree && e < (1U << floor_log_2_d)) {
+ // This power works
+ more = floor_log_2_d - 1;
+ } else {
+ // We need to go one higher. This should not make proposed_m
+ // overflow, but it will make it negative when interpreted as an
+ // int32_t.
+ proposed_m += proposed_m;
+ const uint32_t twice_rem = rem + rem;
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+
+ proposed_m += 1;
+ int32_t magic = (int32_t)proposed_m;
+
+ // Mark if we are negative. Note we only negate the magic number in the
+ // branchfull case.
+ if (d < 0) {
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
+ if (!branchfree) {
+ magic = -magic;
+ }
+ }
+
+ result.more = more;
+ result.magic = magic;
+ }
+ return result;
+}
+
+struct libdivide_s32_t libdivide_s32_gen(int32_t d) {
+ return libdivide_internal_s32_gen(d, 0);
+}
+
+struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) {
+ struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1);
+ struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more};
+ return result;
+}
+
+int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+ if (!denom->magic) {
+ uint32_t sign = (int8_t)more >> 7;
+ uint32_t mask = (1U << shift) - 1;
+ uint32_t uq = numer + ((numer >> 31) & mask);
+ int32_t q = (int32_t)uq;
+ q >>= shift;
+ q = (q ^ sign) - sign;
+ return q;
+ } else {
+ uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift and then sign extend
+ int32_t sign = (int8_t)more >> 7;
+ // q += (more < 0 ? -numer : numer)
+ // cast required to avoid UB
+ uq += ((uint32_t)numer ^ sign) - sign;
+ }
+ int32_t q = (int32_t)uq;
+ q >>= shift;
+ q += (q < 0);
+ return q;
+ }
+}
+
+int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift and then sign extend
+ int32_t sign = (int8_t)more >> 7;
+ int32_t magic = denom->magic;
+ int32_t q = libdivide_mullhi_s32(magic, numer);
+ q += numer;
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
+ // 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ uint32_t q_sign = (uint32_t)(q >> 31);
+ q += q_sign & ((1U << shift) - is_power_of_2);
+
+ // Now arithmetic right shift
+ q >>= shift;
+ // Negate if needed
+ q = (q ^ sign) - sign;
+
+ return q;
+}
+
+int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ if (!denom->magic) {
+ uint32_t absD = 1U << shift;
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
+ absD = -absD;
+ }
+ return (int32_t)absD;
+ } else {
+ // Unsigned math is much easier
+ // We negate the magic number only in the branchfull case, and we don't
+ // know which case we're in. However we have enough information to
+ // determine the correct sign of the magic number. The divisor was
+ // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set,
+ // the magic number's sign is opposite that of the divisor.
+ // We want to compute the positive magic number.
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
+ ? denom->magic > 0 : denom->magic < 0;
+
+ // Handle the power of 2 case (including branchfree)
+ if (denom->magic == 0) {
+ int32_t result = 1U << shift;
+ return negative_divisor ? -result : result;
+ }
+
+ uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic);
+ uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30
+ uint32_t q = (uint32_t)(n / d);
+ int32_t result = (int32_t)q;
+ result += 1;
+ return negative_divisor ? -result : result;
+ }
+}
+
+int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) {
+ return libdivide_s32_recover((const struct libdivide_s32_t *)denom);
+}
+
+///////////// SINT64
+
+static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_s64_t result;
+
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
+ // This is especially important because the magic algorithm fails for -1.
+ // To check if d is a power of 2 or its inverse, it suffices to check
+ // whether its absolute value has exactly one bit set. This works even for
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
+ // and is a power of 2.
+ uint64_t ud = (uint64_t)d;
+ uint64_t absD = (d < 0) ? -ud : ud;
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD);
+ // check if exactly one bit is set,
+ // don't care if absD is 0 since that's divide by zero
+ if ((absD & (absD - 1)) == 0) {
+ // Branchfree and non-branchfree cases are the same
+ result.magic = 0;
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
+ } else {
+ // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word
+ // is 0 and the high word is floor_log_2_d - 1
+ uint8_t more;
+ uint64_t rem, proposed_m;
+ proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem);
+ const uint64_t e = absD - rem;
+
+ // We are going to start with a power of floor_log_2_d - 1.
+ // This works if works if e < 2**floor_log_2_d.
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
+ // This power works
+ more = floor_log_2_d - 1;
+ } else {
+ // We need to go one higher. This should not make proposed_m
+ // overflow, but it will make it negative when interpreted as an
+ // int32_t.
+ proposed_m += proposed_m;
+ const uint64_t twice_rem = rem + rem;
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
+ // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we
+ // also set ADD_MARKER this is an annoying optimization that
+ // enables algorithm #4 to avoid the mask. However we always set it
+ // in the branchfree case
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+ proposed_m += 1;
+ int64_t magic = (int64_t)proposed_m;
+
+ // Mark if we are negative
+ if (d < 0) {
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
+ if (!branchfree) {
+ magic = -magic;
+ }
+ }
+
+ result.more = more;
+ result.magic = magic;
+ }
+ return result;
+}
+
+struct libdivide_s64_t libdivide_s64_gen(int64_t d) {
+ return libdivide_internal_s64_gen(d, 0);
+}
+
+struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) {
+ struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1);
+ struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more};
+ return ret;
+}
+
+int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+ if (!denom->magic) { // shift path
+ uint64_t mask = (1ULL << shift) - 1;
+ uint64_t uq = numer + ((numer >> 63) & mask);
+ int64_t q = (int64_t)uq;
+ q >>= shift;
+ // must be arithmetic shift and then sign-extend
+ int64_t sign = (int8_t)more >> 7;
+ q = (q ^ sign) - sign;
+ return q;
+ } else {
+ uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift and then sign extend
+ int64_t sign = (int8_t)more >> 7;
+ // q += (more < 0 ? -numer : numer)
+ // cast required to avoid UB
+ uq += ((uint64_t)numer ^ sign) - sign;
+ }
+ int64_t q = (int64_t)uq;
+ q >>= shift;
+ q += (q < 0);
+ return q;
+ }
+}
+
+int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift and then sign extend
+ int64_t sign = (int8_t)more >> 7;
+ int64_t magic = denom->magic;
+ int64_t q = libdivide_mullhi_s64(magic, numer);
+ q += numer;
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
+ // 2, or (2**shift) if it is not a power of 2.
+ uint64_t is_power_of_2 = (magic == 0);
+ uint64_t q_sign = (uint64_t)(q >> 63);
+ q += q_sign & ((1ULL << shift) - is_power_of_2);
+
+ // Arithmetic right shift
+ q >>= shift;
+ // Negate if needed
+ q = (q ^ sign) - sign;
+
+ return q;
+}
+
+int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ if (denom->magic == 0) { // shift path
+ uint64_t absD = 1ULL << shift;
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
+ absD = -absD;
+ }
+ return (int64_t)absD;
+ } else {
+ // Unsigned math is much easier
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
+ ? denom->magic > 0 : denom->magic < 0;
+
+ uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic);
+ uint64_t n_hi = 1ULL << shift, n_lo = 0;
+ uint64_t rem_ignored;
+ uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored);
+ int64_t result = (int64_t)(q + 1);
+ if (negative_divisor) {
+ result = -result;
+ }
+ return result;
+ }
+}
+
+int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) {
+ return libdivide_s64_recover((const struct libdivide_s64_t *)denom);
+}
+
+#if defined(LIBDIVIDE_AVX512)
+
+static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom);
+static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom);
+static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom);
+static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom);
+
+static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+static inline __m512i libdivide_s64_signbits(__m512i v) {;
+ return _mm512_srai_epi64(v, 63);
+}
+
+static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) {
+ return _mm512_srai_epi64(v, amt);
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) {
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32);
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask);
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// b is one 32-bit value repeated.
+static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) {
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32);
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask);
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) {
+ __m512i lomask = _mm512_set1_epi64(0xffffffff);
+ __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1);
+ __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1);
+ __m512i w0 = _mm512_mul_epu32(x, y);
+ __m512i w1 = _mm512_mul_epu32(x, yh);
+ __m512i w2 = _mm512_mul_epu32(xh, y);
+ __m512i w3 = _mm512_mul_epu32(xh, yh);
+ __m512i w0h = _mm512_srli_epi64(w0, 32);
+ __m512i s1 = _mm512_add_epi64(w1, w0h);
+ __m512i s1l = _mm512_and_si512(s1, lomask);
+ __m512i s1h = _mm512_srli_epi64(s1, 32);
+ __m512i s2 = _mm512_add_epi64(w2, s1l);
+ __m512i s2h = _mm512_srli_epi64(s2, 32);
+ __m512i hi = _mm512_add_epi64(w3, s1h);
+ hi = _mm512_add_epi64(hi, s2h);
+
+ return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) {
+ __m512i p = libdivide_mullhi_u64_vector(x, y);
+ __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y);
+ __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x);
+ p = _mm512_sub_epi64(p, t1);
+ p = _mm512_sub_epi64(p, t2);
+ return p;
+}
+
+////////// UINT32
+
+__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm512_srli_epi32(numers, more);
+ }
+ else {
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
+ return _mm512_srli_epi32(t, shift);
+ }
+ else {
+ return _mm512_srli_epi32(q, more);
+ }
+ }
+}
+
+__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) {
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
+ return _mm512_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm512_srli_epi64(numers, more);
+ }
+ else {
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
+ return _mm512_srli_epi64(t, shift);
+ }
+ else {
+ return _mm512_srli_epi64(q, more);
+ }
+ }
+}
+
+__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) {
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
+ return _mm512_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ uint32_t mask = (1U << shift) - 1;
+ __m512i roundToZeroTweak = _mm512_set1_epi32(mask);
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
+ __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak));
+ q = _mm512_srai_epi32(q, shift);
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign);
+ return q;
+ }
+ else {
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign));
+ }
+ // q >>= shift
+ q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+ q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) {
+ int32_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic));
+ q = _mm512_add_epi32(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31
+ __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2);
+ q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
+ q = _mm512_srai_epi32(q, shift); // q >>= shift
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+////////// SINT64
+
+__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ int64_t magic = denom->magic;
+ if (magic == 0) { // shift path
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ uint64_t mask = (1ULL << shift) - 1;
+ __m512i roundToZeroTweak = _mm512_set1_epi64(mask);
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
+ __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak));
+ q = libdivide_s64_shift_right_vector(q, shift);
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign);
+ return q;
+ }
+ else {
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign));
+ }
+ // q >>= denom->mult_path.shift
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+ q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) {
+ int64_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+
+ // libdivide_mullhi_s64(numers, magic);
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
+ q = _mm512_add_epi64(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2.
+ uint32_t is_power_of_2 = (magic == 0);
+ __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+ __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2);
+ q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+#elif defined(LIBDIVIDE_AVX2)
+
+static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom);
+static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom);
+static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom);
+static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom);
+
+static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+// Implementation of _mm256_srai_epi64(v, 63) (from AVX512).
+static inline __m256i libdivide_s64_signbits(__m256i v) {
+ __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
+ __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31);
+ return signBits;
+}
+
+// Implementation of _mm256_srai_epi64 (from AVX512).
+static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) {
+ const int b = 64 - amt;
+ __m256i m = _mm256_set1_epi64x(1ULL << (b - 1));
+ __m256i x = _mm256_srli_epi64(v, amt);
+ __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m);
+ return result;
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) {
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32);
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask);
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// b is one 32-bit value repeated.
+static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) {
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32);
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask);
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) {
+ __m256i lomask = _mm256_set1_epi64x(0xffffffff);
+ __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
+ __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
+ __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l
+ __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
+ __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
+ __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
+ __m256i w0h = _mm256_srli_epi64(w0, 32);
+ __m256i s1 = _mm256_add_epi64(w1, w0h);
+ __m256i s1l = _mm256_and_si256(s1, lomask);
+ __m256i s1h = _mm256_srli_epi64(s1, 32);
+ __m256i s2 = _mm256_add_epi64(w2, s1l);
+ __m256i s2h = _mm256_srli_epi64(s2, 32);
+ __m256i hi = _mm256_add_epi64(w3, s1h);
+ hi = _mm256_add_epi64(hi, s2h);
+
+ return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) {
+ __m256i p = libdivide_mullhi_u64_vector(x, y);
+ __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y);
+ __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x);
+ p = _mm256_sub_epi64(p, t1);
+ p = _mm256_sub_epi64(p, t2);
+ return p;
+}
+
+////////// UINT32
+
+__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm256_srli_epi32(numers, more);
+ }
+ else {
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
+ return _mm256_srli_epi32(t, shift);
+ }
+ else {
+ return _mm256_srli_epi32(q, more);
+ }
+ }
+}
+
+__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) {
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
+ return _mm256_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm256_srli_epi64(numers, more);
+ }
+ else {
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
+ return _mm256_srli_epi64(t, shift);
+ }
+ else {
+ return _mm256_srli_epi64(q, more);
+ }
+ }
+}
+
+__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) {
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
+ return _mm256_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ uint32_t mask = (1U << shift) - 1;
+ __m256i roundToZeroTweak = _mm256_set1_epi32(mask);
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
+ __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak));
+ q = _mm256_srai_epi32(q, shift);
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign);
+ return q;
+ }
+ else {
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign));
+ }
+ // q >>= shift
+ q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+ q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) {
+ int32_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic));
+ q = _mm256_add_epi32(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31
+ __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2);
+ q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
+ q = _mm256_srai_epi32(q, shift); // q >>= shift
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+////////// SINT64
+
+__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ int64_t magic = denom->magic;
+ if (magic == 0) { // shift path
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ uint64_t mask = (1ULL << shift) - 1;
+ __m256i roundToZeroTweak = _mm256_set1_epi64x(mask);
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
+ __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak));
+ q = libdivide_s64_shift_right_vector(q, shift);
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign);
+ return q;
+ }
+ else {
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign));
+ }
+ // q >>= denom->mult_path.shift
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+ q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) {
+ int64_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+
+ // libdivide_mullhi_s64(numers, magic);
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
+ q = _mm256_add_epi64(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2.
+ uint32_t is_power_of_2 = (magic == 0);
+ __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+ __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2);
+ q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+#elif defined(LIBDIVIDE_SSE2)
+
+static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom);
+static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom);
+static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom);
+static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom);
+
+static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+// Implementation of _mm_srai_epi64(v, 63) (from AVX512).
+static inline __m128i libdivide_s64_signbits(__m128i v) {
+ __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
+ __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31);
+ return signBits;
+}
+
+// Implementation of _mm_srai_epi64 (from AVX512).
+static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) {
+ const int b = 64 - amt;
+ __m128i m = _mm_set1_epi64x(1ULL << (b - 1));
+ __m128i x = _mm_srli_epi64(v, amt);
+ __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m);
+ return result;
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) {
+ __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32);
+ __m128i a1X3X = _mm_srli_epi64(a, 32);
+ __m128i mask = _mm_set_epi32(-1, 0, -1, 0);
+ __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask);
+ return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// SSE2 does not have a signed multiplication instruction, but we can convert
+// unsigned to signed pretty efficiently. Again, b is just a 32 bit value
+// repeated four times.
+static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) {
+ __m128i p = libdivide_mullhi_u32_vector(a, b);
+ // t1 = (a >> 31) & y, arithmetic shift
+ __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b);
+ __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a);
+ p = _mm_sub_epi32(p, t1);
+ p = _mm_sub_epi32(p, t2);
+ return p;
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) {
+ __m128i lomask = _mm_set1_epi64x(0xffffffff);
+ __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
+ __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
+ __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l
+ __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
+ __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
+ __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
+ __m128i w0h = _mm_srli_epi64(w0, 32);
+ __m128i s1 = _mm_add_epi64(w1, w0h);
+ __m128i s1l = _mm_and_si128(s1, lomask);
+ __m128i s1h = _mm_srli_epi64(s1, 32);
+ __m128i s2 = _mm_add_epi64(w2, s1l);
+ __m128i s2h = _mm_srli_epi64(s2, 32);
+ __m128i hi = _mm_add_epi64(w3, s1h);
+ hi = _mm_add_epi64(hi, s2h);
+
+ return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) {
+ __m128i p = libdivide_mullhi_u64_vector(x, y);
+ __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y);
+ __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x);
+ p = _mm_sub_epi64(p, t1);
+ p = _mm_sub_epi64(p, t2);
+ return p;
+}
+
+////////// UINT32
+
+__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm_srli_epi32(numers, more);
+ }
+ else {
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
+ return _mm_srli_epi32(t, shift);
+ }
+ else {
+ return _mm_srli_epi32(q, more);
+ }
+ }
+}
+
+__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) {
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
+ return _mm_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm_srli_epi64(numers, more);
+ }
+ else {
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
+ return _mm_srli_epi64(t, shift);
+ }
+ else {
+ return _mm_srli_epi64(q, more);
+ }
+ }
+}
+
+__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) {
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
+ return _mm_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ uint32_t mask = (1U << shift) - 1;
+ __m128i roundToZeroTweak = _mm_set1_epi32(mask);
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
+ __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak));
+ q = _mm_srai_epi32(q, shift);
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign);
+ return q;
+ }
+ else {
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign));
+ }
+ // q >>= shift
+ q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+ q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) {
+ int32_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic));
+ q = _mm_add_epi32(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31
+ __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2);
+ q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
+ q = _mm_srai_epi32(q, shift); // q >>= shift
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+////////// SINT64
+
+__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ int64_t magic = denom->magic;
+ if (magic == 0) { // shift path
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ uint64_t mask = (1ULL << shift) - 1;
+ __m128i roundToZeroTweak = _mm_set1_epi64x(mask);
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
+ __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak));
+ q = libdivide_s64_shift_right_vector(q, shift);
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign);
+ return q;
+ }
+ else {
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign));
+ }
+ // q >>= denom->mult_path.shift
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+ q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) {
+ int64_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+
+ // libdivide_mullhi_s64(numers, magic);
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
+ q = _mm_add_epi64(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2.
+ uint32_t is_power_of_2 = (magic == 0);
+ __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+ __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2);
+ q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+#endif
+
+/////////// C++ stuff
+
+#ifdef __cplusplus
+
+// The C++ divider class is templated on both an integer type
+// (like uint64_t) and an algorithm type.
+// * BRANCHFULL is the default algorithm type.
+// * BRANCHFREE is the branchfree algorithm type.
+enum {
+ BRANCHFULL,
+ BRANCHFREE
+};
+
+#if defined(LIBDIVIDE_AVX512)
+ #define LIBDIVIDE_VECTOR_TYPE __m512i
+#elif defined(LIBDIVIDE_AVX2)
+ #define LIBDIVIDE_VECTOR_TYPE __m256i
+#elif defined(LIBDIVIDE_SSE2)
+ #define LIBDIVIDE_VECTOR_TYPE __m128i
+#endif
+
+#if !defined(LIBDIVIDE_VECTOR_TYPE)
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO)
+#else
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \
+ return libdivide_##ALGO##_do_vector(n, &denom); \
+ }
+#endif
+
+// The DISPATCHER_GEN() macro generates C++ methods (for the given integer
+// and algorithm types) that redirect to libdivide's C API.
+#define DISPATCHER_GEN(T, ALGO) \
+ libdivide_##ALGO##_t denom; \
+ dispatcher() { } \
+ dispatcher(T d) \
+ : denom(libdivide_##ALGO##_gen(d)) \
+ { } \
+ T divide(T n) const { \
+ return libdivide_##ALGO##_do(n, &denom); \
+ } \
+ LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
+ T recover() const { \
+ return libdivide_##ALGO##_recover(&denom); \
+ }
+
+// The dispatcher selects a specific division algorithm for a given
+// type and ALGO using partial template specialization.
+template struct dispatcher { };
+
+template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) };
+template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) };
+template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) };
+template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) };
+template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) };
+template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) };
+template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) };
+template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) };
+
+// This is the main divider class for use by the user (C++ API).
+// The actual division algorithm is selected using the dispatcher struct
+// based on the integer and algorithm template parameters.
+template
+class divider {
+public:
+ // We leave the default constructor empty so that creating
+ // an array of dividers and then initializing them
+ // later doesn't slow us down.
+ divider() { }
+
+ // Constructor that takes the divisor as a parameter
+ divider(T d) : div(d) { }
+
+ // Divides n by the divisor
+ T divide(T n) const {
+ return div.divide(n);
+ }
+
+ // Recovers the divisor, returns the value that was
+ // used to initialize this divider object.
+ T recover() const {
+ return div.recover();
+ }
+
+ bool operator==(const divider& other) const {
+ return div.denom.magic == other.denom.magic &&
+ div.denom.more == other.denom.more;
+ }
+
+ bool operator!=(const divider& other) const {
+ return !(*this == other);
+ }
+
+#if defined(LIBDIVIDE_VECTOR_TYPE)
+ // Treats the vector as packed integer values with the same type as
+ // the divider (e.g. s32, u32, s64, u64) and divides each of
+ // them by the divider, returning the packed quotients.
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const {
+ return div.divide(n);
+ }
+#endif
+
+private:
+ // Storage for the actual divisor
+ dispatcher::value,
+ std::is_signed::value, sizeof(T), ALGO> div;
+};
+
+// Overload of operator / for scalar division
+template
+T operator/(T n, const divider& div) {
+ return div.divide(n);
+}
+
+// Overload of operator /= for scalar division
+template
+T& operator/=(T& n, const divider& div) {
+ n = div.divide(n);
+ return n;
+}
+
+#if defined(LIBDIVIDE_VECTOR_TYPE)
+ // Overload of operator / for vector division
+ template
+ LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) {
+ return div.divide(n);
+ }
+ // Overload of operator /= for vector division
+ template
+ LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) {
+ n = div.divide(n);
+ return n;
+ }
+#endif
+
+// libdivdie::branchfree_divider
+template
+using branchfree_divider = divider;
+
+} // namespace libdivide
+
+#endif // __cplusplus
+
+#endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ufuncobject.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ufuncobject.h
new file mode 100644
index 00000000..f5f82b57
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/ufuncobject.h
@@ -0,0 +1,343 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The legacy generic inner loop for a standard element-wise or
+ * generalized ufunc.
+ */
+typedef void (*PyUFuncGenericFunction)
+ (char **args,
+ npy_intp const *dimensions,
+ npy_intp const *strides,
+ void *innerloopdata);
+
+/*
+ * The most generic one-dimensional inner loop for
+ * a masked standard element-wise ufunc. "Masked" here means that it skips
+ * doing calculations on any items for which the maskptr array has a true
+ * value.
+ */
+typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
+ char **dataptrs, npy_intp *strides,
+ char *maskptr, npy_intp mask_stride,
+ npy_intp count,
+ NpyAuxData *innerloopdata);
+
+/* Forward declaration for the type resolver and loop selector typedefs */
+struct _tagPyUFuncObject;
+
+/*
+ * Given the operands for calling a ufunc, should determine the
+ * calculation input and output data types and return an inner loop function.
+ * This function should validate that the casting rule is being followed,
+ * and fail if it is not.
+ *
+ * For backwards compatibility, the regular type resolution function does not
+ * support auxiliary data with object semantics. The type resolution call
+ * which returns a masked generic function returns a standard NpyAuxData
+ * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
+ * work.
+ *
+ * ufunc: The ufunc object.
+ * casting: The 'casting' parameter provided to the ufunc.
+ * operands: An array of length (ufunc->nin + ufunc->nout),
+ * with the output parameters possibly NULL.
+ * type_tup: Either NULL, or the type_tup passed to the ufunc.
+ * out_dtypes: An array which should be populated with new
+ * references to (ufunc->nin + ufunc->nout) new
+ * dtypes, one for each input and output. These
+ * dtypes should all be in native-endian format.
+ *
+ * Should return 0 on success, -1 on failure (with exception set),
+ * or -2 if Py_NotImplemented should be returned.
+ */
+typedef int (PyUFunc_TypeResolutionFunc)(
+ struct _tagPyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
+/*
+ * This is the signature for the functions that may be assigned to the
+ * `process_core_dims_func` field of the PyUFuncObject structure.
+ * Implementation of this function is optional. This function is only used
+ * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1).
+ * The function is called by the ufunc during the processing of the arguments
+ * of a call of the ufunc. The function can check the core dimensions of the
+ * input and output arrays and return -1 with an exception set if any
+ * requirements are not satisfied. If the caller of the ufunc didn't provide
+ * output arrays, the core dimensions associated with the output arrays (i.e.
+ * those that are not also used in input arrays) will have the value -1 in
+ * `core_dim_sizes`. This function can replace any output core dimensions
+ * that are -1 with a value that is appropriate for the ufunc.
+ *
+ * Parameter Description
+ * --------------- ------------------------------------------------------
+ * ufunc The ufunc object
+ * core_dim_sizes An array with length `ufunc->core_num_dim_ix`.
+ * The core dimensions of the arrays passed to the ufunc
+ * will have been set. If the caller of the ufunc didn't
+ * provide the output array(s), the output-only core
+ * dimensions will have the value -1.
+ *
+ * The function must not change any element in `core_dim_sizes` that is
+ * not -1 on input. Doing so will result in incorrect output from the
+ * ufunc, and could result in a crash of the Python interpreter.
+ *
+ * The function must return 0 on success, -1 on failure (with an exception
+ * set).
+ */
+typedef int (PyUFunc_ProcessCoreDimsFunc)(
+ struct _tagPyUFuncObject *ufunc,
+ npy_intp *core_dim_sizes);
+
+typedef struct _tagPyUFuncObject {
+ PyObject_HEAD
+ /*
+ * nin: Number of inputs
+ * nout: Number of outputs
+ * nargs: Always nin + nout (Why is it stored?)
+ */
+ int nin, nout, nargs;
+
+ /*
+ * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero
+ * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,
+ * PyUFunc_IdentityValue.
+ */
+ int identity;
+
+ /* Array of one-dimensional core loops */
+ PyUFuncGenericFunction *functions;
+ /* Array of funcdata that gets passed into the functions */
+ void *const *data;
+ /* The number of elements in 'functions' and 'data' */
+ int ntypes;
+
+ /* Used to be unused field 'check_return' */
+ int reserved1;
+
+ /* The name of the ufunc */
+ const char *name;
+
+ /* Array of type numbers, of size ('nargs' * 'ntypes') */
+ const char *types;
+
+ /* Documentation string */
+ const char *doc;
+
+ void *ptr;
+ PyObject *obj;
+ PyObject *userloops;
+
+ /* generalized ufunc parameters */
+
+ /* 0 for scalar ufunc; 1 for generalized ufunc */
+ int core_enabled;
+ /* number of distinct dimension names in signature */
+ int core_num_dim_ix;
+
+ /*
+ * dimension indices of input/output argument k are stored in
+ * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
+ */
+
+ /* numbers of core dimensions of each argument */
+ int *core_num_dims;
+ /*
+ * dimension indices in a flatted form; indices
+ * are in the range of [0,core_num_dim_ix)
+ */
+ int *core_dim_ixs;
+ /*
+ * positions of 1st core dimensions of each
+ * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
+ */
+ int *core_offsets;
+ /* signature string for printing purpose */
+ char *core_signature;
+
+ /*
+ * A function which resolves the types and fills an array
+ * with the dtypes for the inputs and outputs.
+ */
+ PyUFunc_TypeResolutionFunc *type_resolver;
+
+ /* A dictionary to monkeypatch ufuncs */
+ PyObject *dict;
+
+ /*
+ * This was blocked off to be the "new" inner loop selector in 1.7,
+ * but this was never implemented. (This is also why the above
+ * selector is called the "legacy" selector.)
+ */
+ #ifndef Py_LIMITED_API
+ vectorcallfunc vectorcall;
+ #else
+ void *vectorcall;
+ #endif
+
+ /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */
+ void *reserved3;
+
+ /*
+ * List of flags for each operand when ufunc is called by nditer object.
+ * These flags will be used in addition to the default flags for each
+ * operand set by nditer object.
+ */
+ npy_uint32 *op_flags;
+
+ /*
+ * List of global flags used when ufunc is called by nditer object.
+ * These flags will be used in addition to the default global flags
+ * set by nditer object.
+ */
+ npy_uint32 iter_flags;
+
+ /* New in NPY_API_VERSION 0x0000000D and above */
+ #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
+ /*
+ * for each core_num_dim_ix distinct dimension names,
+ * the possible "frozen" size (-1 if not frozen).
+ */
+ npy_intp *core_dim_sizes;
+
+ /*
+ * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
+ */
+ npy_uint32 *core_dim_flags;
+
+ /* Identity for reduction, when identity == PyUFunc_IdentityValue */
+ PyObject *identity_value;
+ #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */
+
+ /* New in NPY_API_VERSION 0x0000000F and above */
+ #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+ /* New private fields related to dispatching */
+ void *_dispatch_cache;
+ /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */
+ PyObject *_loops;
+ #endif
+ #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION
+ /*
+ * Optional function to process core dimensions of a gufunc.
+ */
+ PyUFunc_ProcessCoreDimsFunc *process_core_dims_func;
+ #endif
+} PyUFuncObject;
+
+#include "arrayobject.h"
+/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
+/* the core dimension's size will be determined by the operands. */
+#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
+/* the core dimension may be absent */
+#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
+/* flags inferred during execution */
+#define UFUNC_CORE_DIM_MISSING 0x00040000
+
+
+#define UFUNC_OBJ_ISOBJECT 1
+#define UFUNC_OBJ_NEEDS_API 2
+
+
+#if NPY_ALLOW_THREADS
+#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
+#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
+#else
+#define NPY_LOOP_BEGIN_THREADS
+#define NPY_LOOP_END_THREADS
+#endif
+
+/*
+ * UFunc has unit of 0, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_Zero 0
+/*
+ * UFunc has unit of 1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_One 1
+/*
+ * UFunc has unit of -1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once. Intended for
+ * bitwise_and reduction.
+ */
+#define PyUFunc_MinusOne 2
+/*
+ * UFunc has no unit, and the order of operations cannot be reordered.
+ * This case does not allow reduction with multiple axes at once.
+ */
+#define PyUFunc_None -1
+/*
+ * UFunc has no unit, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_ReorderableNone -2
+/*
+ * UFunc unit is an identity_value, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_IdentityValue -3
+
+
+#define UFUNC_REDUCE 0
+#define UFUNC_ACCUMULATE 1
+#define UFUNC_REDUCEAT 2
+#define UFUNC_OUTER 3
+
+
+typedef struct {
+ int nin;
+ int nout;
+ PyObject *callable;
+} PyUFunc_PyFuncData;
+
+/* A linked-list of function information for
+ user-defined 1-d loops.
+ */
+typedef struct _loop1d_info {
+ PyUFuncGenericFunction func;
+ void *data;
+ int *arg_types;
+ struct _loop1d_info *next;
+ int nargs;
+ PyArray_Descr **arg_dtypes;
+} PyUFunc_Loop1d;
+
+
+#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
+
+/* THESE MACROS ARE DEPRECATED.
+ * Use npy_set_floatstatus_* in the npymath library.
+ */
+#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO
+#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW
+#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
+#define UFUNC_FPE_INVALID NPY_FPE_INVALID
+
+/* Make sure it gets defined if it isn't already */
+#ifndef UFUNC_NOFPE
+/* Clear the floating point exception default of Borland C++ */
+#if defined(__BORLANDC__)
+#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
+#else
+#define UFUNC_NOFPE
+#endif
+#endif
+
+#include "__ufunc_api.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/utils.h b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/utils.h
new file mode 100644
index 00000000..97f06092
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/include/numpy/utils.h
@@ -0,0 +1,37 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
+
+#ifndef __COMP_NPY_UNUSED
+ #if defined(__GNUC__)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ #elif defined(__ICC)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ #elif defined(__clang__)
+ #define __COMP_NPY_UNUSED __attribute__ ((unused))
+ #else
+ #define __COMP_NPY_UNUSED
+ #endif
+#endif
+
+#if defined(__GNUC__) || defined(__ICC) || defined(__clang__)
+ #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
+#elif defined(_MSC_VER)
+ #define NPY_DECL_ALIGNED(x) __declspec(align(x))
+#else
+ #define NPY_DECL_ALIGNED(x)
+#endif
+
+/* Use this to tag a variable as not used. It will remove unused variable
+ * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
+ * to avoid accidental use */
+#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED
+#define NPY_EXPAND(x) x
+
+#define NPY_STRINGIFY(x) #x
+#define NPY_TOSTRING(x) NPY_STRINGIFY(x)
+
+#define NPY_CAT__(a, b) a ## b
+#define NPY_CAT_(a, b) NPY_CAT__(a, b)
+#define NPY_CAT(a, b) NPY_CAT_(a, b)
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/libnpymath.a b/.venv/lib/python3.12/site-packages/numpy/_core/lib/libnpymath.a
new file mode 100644
index 00000000..f8d561d2
Binary files /dev/null and b/.venv/lib/python3.12/site-packages/numpy/_core/lib/libnpymath.a differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/mlib.ini b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/mlib.ini
new file mode 100644
index 00000000..5840f5e1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/mlib.ini
@@ -0,0 +1,12 @@
+[meta]
+Name = mlib
+Description = Math library used with this version of numpy
+Version = 1.0
+
+[default]
+Libs=-lm
+Cflags=
+
+[msvc]
+Libs=m.lib
+Cflags=
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/npymath.ini b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/npymath.ini
new file mode 100644
index 00000000..8d879e3f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/lib/npy-pkg-config/npymath.ini
@@ -0,0 +1,20 @@
+[meta]
+Name=npymath
+Description=Portable, core math library implementing C99 standard
+Version=0.1
+
+[variables]
+pkgname=numpy._core
+prefix=${pkgdir}
+libdir=${prefix}/lib
+includedir=${prefix}/include
+
+[default]
+Libs=-L${libdir} -lnpymath
+Cflags=-I${includedir}
+Requires=mlib
+
+[msvc]
+Libs=/LIBPATH:${libdir} npymath.lib
+Cflags=/INCLUDE:${includedir}
+Requires=mlib
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/lib/pkgconfig/numpy.pc b/.venv/lib/python3.12/site-packages/numpy/_core/lib/pkgconfig/numpy.pc
new file mode 100644
index 00000000..060441ee
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/lib/pkgconfig/numpy.pc
@@ -0,0 +1,7 @@
+prefix=${pcfiledir}/../..
+includedir=${prefix}/include
+
+Name: numpy
+Description: NumPy is the fundamental package for scientific computing with Python.
+Version: 2.3.3
+Cflags: -I${includedir}
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/memmap.py b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.py
new file mode 100644
index 00000000..8cfa7f94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.py
@@ -0,0 +1,363 @@
+import operator
+from contextlib import nullcontext
+
+import numpy as np
+from numpy._utils import set_module
+
+from .numeric import dtype, ndarray, uint8
+
+__all__ = ['memmap']
+
+dtypedescr = dtype
+valid_filemodes = ["r", "c", "r+", "w+"]
+writeable_filemodes = ["r+", "w+"]
+
+mode_equivalents = {
+ "readonly": "r",
+ "copyonwrite": "c",
+ "readwrite": "r+",
+ "write": "w+"
+ }
+
+
+@set_module('numpy')
+class memmap(ndarray):
+ """Create a memory-map to an array stored in a *binary* file on disk.
+
+ Memory-mapped files are used for accessing small segments of large files
+ on disk, without reading the entire file into memory. NumPy's
+ memmap's are array-like objects. This differs from Python's ``mmap``
+ module, which uses file-like objects.
+
+ This subclass of ndarray has some unpleasant interactions with
+ some operations, because it doesn't quite fit properly as a subclass.
+ An alternative to using this subclass is to create the ``mmap``
+ object yourself, then create an ndarray with ndarray.__new__ directly,
+ passing the object created in its 'buffer=' parameter.
+
+ This class may at some point be turned into a factory function
+ which returns a view into an mmap buffer.
+
+ Flush the memmap instance to write the changes to the file. Currently there
+ is no API to close the underlying ``mmap``. It is tricky to ensure the
+ resource is actually closed, since it may be shared between different
+ memmap instances.
+
+
+ Parameters
+ ----------
+ filename : str, file-like object, or pathlib.Path instance
+ The file name or file object to be used as the array data buffer.
+ dtype : data-type, optional
+ The data-type used to interpret the file contents.
+ Default is `uint8`.
+ mode : {'r+', 'r', 'w+', 'c'}, optional
+ The file is opened in this mode:
+
+ +------+-------------------------------------------------------------+
+ | 'r' | Open existing file for reading only. |
+ +------+-------------------------------------------------------------+
+ | 'r+' | Open existing file for reading and writing. |
+ +------+-------------------------------------------------------------+
+ | 'w+' | Create or overwrite existing file for reading and writing. |
+ | | If ``mode == 'w+'`` then `shape` must also be specified. |
+ +------+-------------------------------------------------------------+
+ | 'c' | Copy-on-write: assignments affect data in memory, but |
+ | | changes are not saved to disk. The file on disk is |
+ | | read-only. |
+ +------+-------------------------------------------------------------+
+
+ Default is 'r+'.
+ offset : int, optional
+ In the file, array data starts at this offset. Since `offset` is
+ measured in bytes, it should normally be a multiple of the byte-size
+ of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
+ file are valid; The file will be extended to accommodate the
+ additional data. By default, ``memmap`` will start at the beginning of
+ the file, even if ``filename`` is a file pointer ``fp`` and
+ ``fp.tell() != 0``.
+ shape : int or sequence of ints, optional
+ The desired shape of the array. If ``mode == 'r'`` and the number
+ of remaining bytes after `offset` is not a multiple of the byte-size
+ of `dtype`, you must specify `shape`. By default, the returned array
+ will be 1-D with the number of elements determined by file size
+ and data-type.
+
+ .. versionchanged:: 2.0
+ The shape parameter can now be any integer sequence type, previously
+ types were limited to tuple and int.
+
+ order : {'C', 'F'}, optional
+ Specify the order of the ndarray memory layout:
+ :term:`row-major`, C-style or :term:`column-major`,
+ Fortran-style. This only has an effect if the shape is
+ greater than 1-D. The default order is 'C'.
+
+ Attributes
+ ----------
+ filename : str or pathlib.Path instance
+ Path to the mapped file.
+ offset : int
+ Offset position in the file.
+ mode : str
+ File mode.
+
+ Methods
+ -------
+ flush
+ Flush any changes in memory to file on disk.
+ When you delete a memmap object, flush is called first to write
+ changes to disk.
+
+
+ See also
+ --------
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
+
+ Notes
+ -----
+ The memmap object can be used anywhere an ndarray is accepted.
+ Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
+ ``True``.
+
+ Memory-mapped files cannot be larger than 2GB on 32-bit systems.
+
+ When a memmap causes a file to be created or extended beyond its
+ current size in the filesystem, the contents of the new part are
+ unspecified. On systems with POSIX filesystem semantics, the extended
+ part will be filled with zero bytes.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> data = np.arange(12, dtype='float32')
+ >>> data.resize((3,4))
+
+ This example uses a temporary file so that doctest doesn't write
+ files to your directory. You would use a 'normal' filename.
+
+ >>> from tempfile import mkdtemp
+ >>> import os.path as path
+ >>> filename = path.join(mkdtemp(), 'newfile.dat')
+
+ Create a memmap with dtype and shape that matches our data:
+
+ >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
+ >>> fp
+ memmap([[0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.]], dtype=float32)
+
+ Write data to memmap array:
+
+ >>> fp[:] = data[:]
+ >>> fp
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ >>> fp.filename == path.abspath(filename)
+ True
+
+ Flushes memory changes to disk in order to read them back
+
+ >>> fp.flush()
+
+ Load the memmap and verify data was stored:
+
+ >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+ >>> newfp
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ Read-only memmap:
+
+ >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+ >>> fpr.flags.writeable
+ False
+
+ Copy-on-write memmap:
+
+ >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
+ >>> fpc.flags.writeable
+ True
+
+ It's possible to assign to copy-on-write array, but values are only
+ written into the memory copy of the array, and not written to disk:
+
+ >>> fpc
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+ >>> fpc[0,:] = 0
+ >>> fpc
+ memmap([[ 0., 0., 0., 0.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ File on disk is unchanged:
+
+ >>> fpr
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ Offset into a memmap:
+
+ >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
+ >>> fpo
+ memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
+
+ """
+
+ __array_priority__ = -100.0
+
+ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
+ shape=None, order='C'):
+ # Import here to minimize 'import numpy' overhead
+ import mmap
+ import os.path
+ try:
+ mode = mode_equivalents[mode]
+ except KeyError as e:
+ if mode not in valid_filemodes:
+ all_modes = valid_filemodes + list(mode_equivalents.keys())
+ raise ValueError(
+ f"mode must be one of {all_modes!r} (got {mode!r})"
+ ) from None
+
+ if mode == 'w+' and shape is None:
+ raise ValueError("shape must be given if mode == 'w+'")
+
+ if hasattr(filename, 'read'):
+ f_ctx = nullcontext(filename)
+ else:
+ f_ctx = open(
+ os.fspath(filename),
+ ('r' if mode == 'c' else mode) + 'b'
+ )
+
+ with f_ctx as fid:
+ fid.seek(0, 2)
+ flen = fid.tell()
+ descr = dtypedescr(dtype)
+ _dbytes = descr.itemsize
+
+ if shape is None:
+ bytes = flen - offset
+ if bytes % _dbytes:
+ raise ValueError("Size of available data is not a "
+ "multiple of the data-type size.")
+ size = bytes // _dbytes
+ shape = (size,)
+ else:
+ if not isinstance(shape, (tuple, list)):
+ try:
+ shape = [operator.index(shape)]
+ except TypeError:
+ pass
+ shape = tuple(shape)
+ size = np.intp(1) # avoid overflows
+ for k in shape:
+ size *= k
+
+ bytes = int(offset + size * _dbytes)
+
+ if mode in ('w+', 'r+'):
+ # gh-27723
+ # if bytes == 0, we write out 1 byte to allow empty memmap.
+ bytes = max(bytes, 1)
+ if flen < bytes:
+ fid.seek(bytes - 1, 0)
+ fid.write(b'\0')
+ fid.flush()
+
+ if mode == 'c':
+ acc = mmap.ACCESS_COPY
+ elif mode == 'r':
+ acc = mmap.ACCESS_READ
+ else:
+ acc = mmap.ACCESS_WRITE
+
+ start = offset - offset % mmap.ALLOCATIONGRANULARITY
+ bytes -= start
+ # bytes == 0 is problematic as in mmap length=0 maps the full file.
+ # See PR gh-27723 for a more detailed explanation.
+ if bytes == 0 and start > 0:
+ bytes += mmap.ALLOCATIONGRANULARITY
+ start -= mmap.ALLOCATIONGRANULARITY
+ array_offset = offset - start
+ mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
+
+ self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
+ offset=array_offset, order=order)
+ self._mmap = mm
+ self.offset = offset
+ self.mode = mode
+
+ if isinstance(filename, os.PathLike):
+ # special case - if we were constructed with a pathlib.path,
+ # then filename is a path object, not a string
+ self.filename = filename.resolve()
+ elif hasattr(fid, "name") and isinstance(fid.name, str):
+ # py3 returns int for TemporaryFile().name
+ self.filename = os.path.abspath(fid.name)
+ # same as memmap copies (e.g. memmap + 1)
+ else:
+ self.filename = None
+
+ return self
+
+ def __array_finalize__(self, obj):
+ if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
+ self._mmap = obj._mmap
+ self.filename = obj.filename
+ self.offset = obj.offset
+ self.mode = obj.mode
+ else:
+ self._mmap = None
+ self.filename = None
+ self.offset = None
+ self.mode = None
+
+ def flush(self):
+ """
+ Write any changes in the array to the file on disk.
+
+ For further information, see `memmap`.
+
+ Parameters
+ ----------
+ None
+
+ See Also
+ --------
+ memmap
+
+ """
+ if self.base is not None and hasattr(self.base, 'flush'):
+ self.base.flush()
+
+ def __array_wrap__(self, arr, context=None, return_scalar=False):
+ arr = super().__array_wrap__(arr, context)
+
+ # Return a memmap if a memmap was given as the output of the
+ # ufunc. Leave the arr class unchanged if self is not a memmap
+ # to keep original memmap subclasses behavior
+ if self is arr or type(self) is not memmap:
+ return arr
+
+ # Return scalar instead of 0d memmap, e.g. for np.sum with
+ # axis=None (note that subclasses will not reach here)
+ if return_scalar:
+ return arr[()]
+
+ # Return ndarray otherwise
+ return arr.view(np.ndarray)
+
+ def __getitem__(self, index):
+ res = super().__getitem__(index)
+ if type(res) is memmap and res._mmap is None:
+ return res.view(type=ndarray)
+ return res
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/memmap.pyi b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.pyi
new file mode 100644
index 00000000..0b313284
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/memmap.pyi
@@ -0,0 +1,3 @@
+from numpy import memmap
+
+__all__ = ["memmap"]
diff --git a/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.py b/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.py
new file mode 100644
index 00000000..236ca7e7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/_core/multiarray.py
@@ -0,0 +1,1762 @@
+"""
+Create the numpy._core.multiarray namespace for backward compatibility.
+In v1.16 the multiarray and umath c-extension modules were merged into
+a single _multiarray_umath extension module. So we replicate the old
+namespace by importing from the extension module.
+
+"""
+
+import functools
+
+from . import _multiarray_umath, overrides
+from ._multiarray_umath import * # noqa: F403
+
+# These imports are needed for backward compatibility,
+# do not change them. issue gh-15518
+# _get_ndarray_c_version is semi-public, on purpose not added to __all__
+from ._multiarray_umath import ( # noqa: F401
+ _ARRAY_API,
+ _flagdict,
+ _get_madvise_hugepage,
+ _get_ndarray_c_version,
+ _monotonicity,
+ _place,
+ _reconstruct,
+ _set_madvise_hugepage,
+ _vec_string,
+ from_dlpack,
+)
+
+__all__ = [
+ '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
+ 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
+ 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
+ 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP',
+ '_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string',
+ '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',
+ 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',
+ 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
+ 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
+ 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
+ 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
+ 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
+ 'frombuffer', 'fromfile', 'fromiter', 'fromstring',
+ 'get_handler_name', 'get_handler_version', 'inner', 'interp',
+ 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot',
+ 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',
+ 'normalize_axis_index', 'packbits', 'promote_types', 'putmask',
+ 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
+ 'set_typeDict', 'shares_memory', 'typeinfo',
+ 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros']
+
+# For backward compatibility, make sure pickle imports
+# these functions from here
+_reconstruct.__module__ = 'numpy._core.multiarray'
+scalar.__module__ = 'numpy._core.multiarray'
+
+
+from_dlpack.__module__ = 'numpy'
+arange.__module__ = 'numpy'
+array.__module__ = 'numpy'
+asarray.__module__ = 'numpy'
+asanyarray.__module__ = 'numpy'
+ascontiguousarray.__module__ = 'numpy'
+asfortranarray.__module__ = 'numpy'
+datetime_data.__module__ = 'numpy'
+empty.__module__ = 'numpy'
+frombuffer.__module__ = 'numpy'
+fromfile.__module__ = 'numpy'
+fromiter.__module__ = 'numpy'
+frompyfunc.__module__ = 'numpy'
+fromstring.__module__ = 'numpy'
+may_share_memory.__module__ = 'numpy'
+nested_iters.__module__ = 'numpy'
+promote_types.__module__ = 'numpy'
+zeros.__module__ = 'numpy'
+normalize_axis_index.__module__ = 'numpy.lib.array_utils'
+add_docstring.__module__ = 'numpy.lib'
+compare_chararrays.__module__ = 'numpy.char'
+
+
+def _override___module__():
+ namespace_names = globals()
+ for ufunc_name in [
+ 'absolute', 'arccos', 'arccosh', 'add', 'arcsin', 'arcsinh', 'arctan',
+ 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'invert',
+ 'left_shift', 'bitwise_or', 'right_shift', 'bitwise_xor', 'cbrt',
+ 'ceil', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees',
+ 'divide', 'divmod', 'equal', 'exp', 'exp2', 'expm1', 'fabs',
+ 'float_power', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod',
+ 'frexp', 'gcd', 'greater', 'greater_equal', 'heaviside', 'hypot',
+ 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less',
+ 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
+ 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
+ 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder',
+ 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive',
+ 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit',
+ 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh',
+ 'trunc', 'vecdot', 'vecmat',
+ ]:
+ ufunc = namespace_names[ufunc_name]
+ ufunc.__module__ = "numpy"
+ ufunc.__qualname__ = ufunc_name
+
+
+_override___module__()
+
+
+# We can't verify dispatcher signatures because NumPy's C functions don't
+# support introspection.
+array_function_from_c_func_and_dispatcher = functools.partial(
+ overrides.array_function_from_dispatcher,
+ module='numpy', docs_from_dispatcher=True, verify=False)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
+def empty_like(
+ prototype, dtype=None, order=None, subok=None, shape=None, *, device=None
+):
+ """
+ empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *,
+ device=None)
+
+ Return a new array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ prototype : array_like
+ The shape and data-type of `prototype` define these same attributes
+ of the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `prototype` is Fortran
+ contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
+ as closely as possible.
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of `prototype`, otherwise it will be a base-class array. Defaults
+ to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+ device : str, optional
+ The device on which to place the created array. Default: None.
+ For Array-API interoperability only, so must be ``"cpu"`` if passed.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data with the same
+ shape and type as `prototype`.
+
+ See Also
+ --------
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+
+ Notes
+ -----
+ Unlike other array creation functions (e.g. `zeros_like`, `ones_like`,
+ `full_like`), `empty_like` does not initialize the values of the array,
+ and may therefore be marginally faster. However, the values stored in the
+ newly allocated array are arbitrary. For reproducible behavior, be sure
+ to set each element of the array before reading.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = ([1,2,3], [4,5,6]) # a is array-like
+ >>> np.empty_like(a)
+ array([[-1073741821, -1073741821, 3], # uninitialized
+ [ 0, 0, -1073741821]])
+ >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
+ >>> np.empty_like(a)
+ array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
+ [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
+
+ """
+ return (prototype,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
+def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
+ """
+ concatenate(
+ (a1, a2, ...),
+ axis=0,
+ out=None,
+ dtype=None,
+ casting="same_kind"
+ )
+
+ Join a sequence of arrays along an existing axis.
+
+ Parameters
+ ----------
+ a1, a2, ... : sequence of array_like
+ The arrays must have the same shape, except in the dimension
+ corresponding to `axis` (the first, by default).
+ axis : int, optional
+ The axis along which the arrays will be joined. If axis is None,
+ arrays are flattened before use. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
+ dtype : str or dtype
+ If provided, the destination array will have this dtype. Cannot be
+ provided together with `out`.
+
+ .. versionadded:: 1.20.0
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'same_kind'.
+ For a description of the options, please see :term:`casting`.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ res : ndarray
+ The concatenated array.
+
+ See Also
+ --------
+ ma.concatenate : Concatenate function that preserves input masks.
+ array_split : Split an array into multiple sub-arrays of equal or
+ near-equal size.
+ split : Split array into a list of multiple sub-arrays of equal size.
+ hsplit : Split array into multiple sub-arrays horizontally (column wise).
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+ stack : Stack a sequence of arrays along a new axis.
+ block : Assemble arrays from blocks.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third dimension).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+
+ Notes
+ -----
+ When one or more of the arrays to be concatenated is a MaskedArray,
+ this function will return a MaskedArray object instead of an ndarray,
+ but the input masks are *not* preserved. In cases where a MaskedArray
+ is expected as input, use the ma.concatenate function from the masked
+ array module instead.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> b = np.array([[5, 6]])
+ >>> np.concatenate((a, b), axis=0)
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.concatenate((a, b.T), axis=1)
+ array([[1, 2, 5],
+ [3, 4, 6]])
+ >>> np.concatenate((a, b), axis=None)
+ array([1, 2, 3, 4, 5, 6])
+
+ This function will not preserve masking of MaskedArray inputs.
+
+ >>> a = np.ma.arange(3)
+ >>> a[1] = np.ma.masked
+ >>> b = np.arange(2, 5)
+ >>> a
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
+ >>> b
+ array([2, 3, 4])
+ >>> np.concatenate([a, b])
+ masked_array(data=[0, 1, 2, 2, 3, 4],
+ mask=False,
+ fill_value=999999)
+ >>> np.ma.concatenate([a, b])
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
+
+ """
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
+def inner(a, b):
+ """
+ inner(a, b, /)
+
+ Inner product of two arrays.
+
+ Ordinary inner product of vectors for 1-D arrays (without complex
+ conjugation), in higher dimensions a sum product over the last axes.
+
+ Parameters
+ ----------
+ a, b : array_like
+ If `a` and `b` are nonscalar, their last dimensions must match.
+
+ Returns
+ -------
+ out : ndarray
+ If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ ``out.shape = (*a.shape[:-1], *b.shape[:-1])``
+
+ Raises
+ ------
+ ValueError
+ If both `a` and `b` are nonscalar and their last dimensions have
+ different sizes.
+
+ See Also
+ --------
+ tensordot : Sum products over arbitrary axes.
+ dot : Generalised matrix product, using second last dimension of `b`.
+ vecdot : Vector dot product of two arrays.
+ einsum : Einstein summation convention.
+
+ Notes
+ -----
+ For vectors (1-D arrays) it computes the ordinary inner-product::
+
+ np.inner(a, b) = sum(a[:]*b[:])
+
+ More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::
+
+ np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
+
+ or explicitly::
+
+ np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
+ = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
+
+ In addition `a` or `b` may be scalars, in which case::
+
+ np.inner(a,b) = a*b
+
+ Examples
+ --------
+ Ordinary inner product for vectors:
+
+ >>> import numpy as np
+ >>> a = np.array([1,2,3])
+ >>> b = np.array([0,1,0])
+ >>> np.inner(a, b)
+ 2
+
+ Some multidimensional examples:
+
+ >>> a = np.arange(24).reshape((2,3,4))
+ >>> b = np.arange(4)
+ >>> c = np.inner(a, b)
+ >>> c.shape
+ (2, 3)
+ >>> c
+ array([[ 14, 38, 62],
+ [ 86, 110, 134]])
+
+ >>> a = np.arange(2).reshape((1,1,2))
+ >>> b = np.arange(6).reshape((3,2))
+ >>> c = np.inner(a, b)
+ >>> c.shape
+ (1, 1, 3)
+ >>> c
+ array([[[1, 3, 5]]])
+
+ An example where `b` is a scalar:
+
+ >>> np.inner(np.eye(2), 7)
+ array([[7., 0.],
+ [0., 7.]])
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
+def where(condition, x=None, y=None):
+ """
+ where(condition, [x, y], /)
+
+ Return elements chosen from `x` or `y` depending on `condition`.
+
+ .. note::
+ When only `condition` is provided, this function is a shorthand for
+ ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
+ preferred, as it behaves correctly for subclasses. The rest of this
+ documentation covers only the case where all three arguments are
+ provided.
+
+ Parameters
+ ----------
+ condition : array_like, bool
+ Where True, yield `x`, otherwise yield `y`.
+ x, y : array_like
+ Values from which to choose. `x`, `y` and `condition` need to be
+ broadcastable to some shape.
+
+ Returns
+ -------
+ out : ndarray
+ An array with elements from `x` where `condition` is True, and elements
+ from `y` elsewhere.
+
+ See Also
+ --------
+ choose
+ nonzero : The function that is called when x and y are omitted
+
+ Notes
+ -----
+ If all the arrays are 1-D, `where` is equivalent to::
+
+ [xv if c else yv
+ for c, xv, yv in zip(condition, x, y)]
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.where(a < 5, a, 10*a)
+ array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
+
+ This can be used on multidimensional arrays too:
+
+ >>> np.where([[True, False], [True, True]],
+ ... [[1, 2], [3, 4]],
+ ... [[9, 8], [7, 6]])
+ array([[1, 8],
+ [3, 4]])
+
+ The shapes of x, y, and the condition are broadcast together:
+
+ >>> x, y = np.ogrid[:3, :4]
+ >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
+ array([[10, 0, 0, 0],
+ [10, 11, 1, 1],
+ [10, 11, 12, 2]])
+
+ >>> a = np.array([[0, 1, 2],
+ ... [0, 2, 4],
+ ... [0, 3, 6]])
+ >>> np.where(a < 4, a, -1) # -1 is broadcast
+ array([[ 0, 1, 2],
+ [ 0, 2, -1],
+ [ 0, 3, -1]])
+ """
+ return (condition, x, y)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
+def lexsort(keys, axis=None):
+ """
+ lexsort(keys, axis=-1)
+
+ Perform an indirect stable sort using a sequence of keys.
+
+ Given multiple sorting keys, lexsort returns an array of integer indices
+ that describes the sort order by multiple keys. The last key in the
+ sequence is used for the primary sort order, ties are broken by the
+ second-to-last key, and so on.
+
+ Parameters
+ ----------
+ keys : (k, m, n, ...) array-like
+ The `k` keys to be sorted. The *last* key (e.g, the last
+ row if `keys` is a 2D array) is the primary sort key.
+ Each element of `keys` along the zeroth axis must be
+ an array-like object of the same shape.
+ axis : int, optional
+ Axis to be indirectly sorted. By default, sort over the last axis
+ of each sequence. Separate slices along `axis` sorted over
+ independently; see last example.
+
+ Returns
+ -------
+ indices : (m, n, ...) ndarray of ints
+ Array of indices that sort the keys along the specified axis.
+
+ See Also
+ --------
+ argsort : Indirect sort.
+ ndarray.sort : In-place sort.
+ sort : Return a sorted copy of an array.
+
+ Examples
+ --------
+ Sort names: first by surname, then by name.
+
+ >>> import numpy as np
+ >>> surnames = ('Hertz', 'Galilei', 'Hertz')
+ >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
+ >>> ind = np.lexsort((first_names, surnames))
+ >>> ind
+ array([1, 2, 0])
+
+ >>> [surnames[i] + ", " + first_names[i] for i in ind]
+ ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
+
+ Sort according to two numerical keys, first by elements
+ of ``a``, then breaking ties according to elements of ``b``:
+
+ >>> a = [1, 5, 1, 4, 3, 4, 4] # First sequence
+ >>> b = [9, 4, 0, 4, 0, 2, 1] # Second sequence
+ >>> ind = np.lexsort((b, a)) # Sort by `a`, then by `b`
+ >>> ind
+ array([2, 0, 4, 6, 5, 3, 1])
+ >>> [(a[i], b[i]) for i in ind]
+ [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
+
+ Compare against `argsort`, which would sort each key independently.
+
+ >>> np.argsort((b, a), kind='stable')
+ array([[2, 4, 6, 5, 1, 3, 0],
+ [0, 2, 4, 3, 5, 6, 1]])
+
+ To sort lexicographically with `argsort`, we would need to provide a
+ structured array.
+
+ >>> x = np.array([(ai, bi) for ai, bi in zip(a, b)],
+ ... dtype = np.dtype([('x', int), ('y', int)]))
+ >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
+ array([2, 0, 4, 6, 5, 3, 1])
+
+ The zeroth axis of `keys` always corresponds with the sequence of keys,
+ so 2D arrays are treated just like other sequences of keys.
+
+ >>> arr = np.asarray([b, a])
+ >>> ind2 = np.lexsort(arr)
+ >>> np.testing.assert_equal(ind2, ind)
+
+ Accordingly, the `axis` parameter refers to an axis of *each* key, not of
+ the `keys` argument itself. For instance, the array ``arr`` is treated as
+ a sequence of two 1-D keys, so specifying ``axis=0`` is equivalent to
+ using the default axis, ``axis=-1``.
+
+ >>> np.testing.assert_equal(np.lexsort(arr, axis=0),
+ ... np.lexsort(arr, axis=-1))
+
+ For higher-dimensional arrays, the axis parameter begins to matter. The
+ resulting array has the same shape as each key, and the values are what
+ we would expect if `lexsort` were performed on corresponding slices
+ of the keys independently. For instance,
+
+ >>> x = [[1, 2, 3, 4],
+ ... [4, 3, 2, 1],
+ ... [2, 1, 4, 3]]
+ >>> y = [[2, 2, 1, 1],
+ ... [1, 2, 1, 2],
+ ... [1, 1, 2, 1]]
+ >>> np.lexsort((x, y), axis=1)
+ array([[2, 3, 0, 1],
+ [2, 0, 3, 1],
+ [1, 0, 3, 2]])
+
+ Each row of the result is what we would expect if we were to perform
+ `lexsort` on the corresponding row of the keys:
+
+ >>> for i in range(3):
+ ... print(np.lexsort((x[i], y[i])))
+ [2 3 0 1]
+ [2 0 3 1]
+ [1 0 3 2]
+
+ """
+ if isinstance(keys, tuple):
+ return keys
+ else:
+ return (keys,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
+def can_cast(from_, to, casting=None):
+ """
+ can_cast(from_, to, casting='safe')
+
+ Returns True if cast between data types can occur according to the
+ casting rule.
+
+ Parameters
+ ----------
+ from_ : dtype, dtype specifier, NumPy scalar, or array
+ Data type, NumPy scalar, or array to cast from.
+ to : dtype or dtype specifier
+ Data type to cast to.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Returns
+ -------
+ out : bool
+ True if cast can occur according to the casting rule.
+
+ Notes
+ -----
+ .. versionchanged:: 2.0
+ This function does not support Python scalars anymore and does not
+ apply any value-based logic for 0-D arrays and NumPy scalars.
+
+ See also
+ --------
+ dtype, result_type
+
+ Examples
+ --------
+ Basic examples
+
+ >>> import numpy as np
+ >>> np.can_cast(np.int32, np.int64)
+ True
+ >>> np.can_cast(np.float64, complex)
+ True
+ >>> np.can_cast(complex, float)
+ False
+
+ >>> np.can_cast('i8', 'f8')
+ True
+ >>> np.can_cast('i8', 'f4')
+ False
+ >>> np.can_cast('i4', 'S4')
+ False
+
+ """
+ return (from_,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
+def min_scalar_type(a):
+ """
+ min_scalar_type(a, /)
+
+ For scalar ``a``, returns the data type with the smallest size
+ and smallest scalar kind which can hold its value. For non-scalar
+ array ``a``, returns the vector's dtype unmodified.
+
+ Floating point values are not demoted to integers,
+ and complex values are not demoted to floats.
+
+ Parameters
+ ----------
+ a : scalar or array_like
+ The value whose minimal data type is to be found.
+
+ Returns
+ -------
+ out : dtype
+ The minimal data type.
+
+ See Also
+ --------
+ result_type, promote_types, dtype, can_cast
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.min_scalar_type(10)
+ dtype('uint8')
+
+ >>> np.min_scalar_type(-260)
+ dtype('int16')
+
+ >>> np.min_scalar_type(3.1)
+ dtype('float16')
+
+ >>> np.min_scalar_type(1e50)
+ dtype('float64')
+
+ >>> np.min_scalar_type(np.arange(4,dtype='f8'))
+ dtype('float64')
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
+def result_type(*arrays_and_dtypes):
+ """
+ result_type(*arrays_and_dtypes)
+
+ Returns the type that results from applying the NumPy
+ type promotion rules to the arguments.
+
+ Type promotion in NumPy works similarly to the rules in languages
+ like C++, with some slight differences. When both scalars and
+ arrays are used, the array's type takes precedence and the actual value
+ of the scalar is taken into account.
+
+ For example, calculating 3*a, where a is an array of 32-bit floats,
+ intuitively should result in a 32-bit float output. If the 3 is a
+ 32-bit integer, the NumPy rules indicate it can't convert losslessly
+ into a 32-bit float, so a 64-bit float should be the result type.
+ By examining the value of the constant, '3', we see that it fits in
+ an 8-bit integer, which can be cast losslessly into the 32-bit float.
+
+ Parameters
+ ----------
+ arrays_and_dtypes : list of arrays and dtypes
+ The operands of some operation whose result type is needed.
+
+ Returns
+ -------
+ out : dtype
+ The result type.
+
+ See also
+ --------
+ dtype, promote_types, min_scalar_type, can_cast
+
+ Notes
+ -----
+ The specific algorithm used is as follows.
+
+ Categories are determined by first checking which of boolean,
+ integer (int/uint), or floating point (float/complex) the maximum
+ kind of all the arrays and the scalars are.
+
+ If there are only scalars or the maximum category of the scalars
+ is higher than the maximum category of the arrays,
+ the data types are combined with :func:`promote_types`
+ to produce the return value.
+
+ Otherwise, `min_scalar_type` is called on each scalar, and
+ the resulting data types are all combined with :func:`promote_types`
+ to produce the return value.
+
+ The set of int values is not a subset of the uint values for types
+ with the same number of bits, something not reflected in
+ :func:`min_scalar_type`, but handled as a special case in `result_type`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.result_type(3, np.arange(7, dtype='i1'))
+ dtype('int8')
+
+ >>> np.result_type('i4', 'c8')
+ dtype('complex128')
+
+ >>> np.result_type(3.0, -2)
+ dtype('float64')
+
+ """
+ return arrays_and_dtypes
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
+def dot(a, b, out=None):
+ """
+ dot(a, b, out=None)
+
+ Dot product of two arrays. Specifically,
+
+ - If both `a` and `b` are 1-D arrays, it is inner product of vectors
+ (without complex conjugation).
+
+ - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
+ but using :func:`matmul` or ``a @ b`` is preferred.
+
+ - If either `a` or `b` is 0-D (scalar), it is equivalent to
+ :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is
+ preferred.
+
+ - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
+ the last axis of `a` and `b`.
+
+ - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
+ sum product over the last axis of `a` and the second-to-last axis of
+ `b`::
+
+ dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
+
+ It uses an optimized BLAS library when possible (see `numpy.linalg`).
+
+ Parameters
+ ----------
+ a : array_like
+ First argument.
+ b : array_like
+ Second argument.
+ out : ndarray, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a,b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ Returns
+ -------
+ output : ndarray
+ Returns the dot product of `a` and `b`. If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ If `out` is given, then it is returned.
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` is not the same size as
+ the second-to-last dimension of `b`.
+
+ See Also
+ --------
+ vdot : Complex-conjugating dot product.
+ vecdot : Vector dot product of two arrays.
+ tensordot : Sum products over arbitrary axes.
+ einsum : Einstein summation convention.
+ matmul : '@' operator as method with out parameter.
+ linalg.multi_dot : Chained dot product.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.dot(3, 4)
+ 12
+
+ Neither argument is complex-conjugated:
+
+ >>> np.dot([2j, 3j], [2j, 3j])
+ (-13+0j)
+
+ For 2-D arrays it is the matrix product:
+
+ >>> a = [[1, 0], [0, 1]]
+ >>> b = [[4, 1], [2, 2]]
+ >>> np.dot(a, b)
+ array([[4, 1],
+ [2, 2]])
+
+ >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
+ >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
+ >>> np.dot(a, b)[2,3,2,1,2,2]
+ 499128
+ >>> sum(a[2,3,2,:] * b[1,2,:,2])
+ 499128
+
+ """
+ return (a, b, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
+def vdot(a, b):
+ r"""
+ vdot(a, b, /)
+
+ Return the dot product of two vectors.
+
+ The `vdot` function handles complex numbers differently than `dot`:
+ if the first argument is complex, it is replaced by its complex conjugate
+ in the dot product calculation. `vdot` also handles multidimensional
+ arrays differently than `dot`: it does not perform a matrix product, but
+ flattens the arguments to 1-D arrays before taking a vector dot product.
+
+ Consequently, when the arguments are 2-D arrays of the same shape, this
+ function effectively returns their
+ `Frobenius inner product `_
+ (also known as the *trace inner product* or the *standard inner product*
+ on a vector space of matrices).
+
+ Parameters
+ ----------
+ a : array_like
+ If `a` is complex the complex conjugate is taken before calculation
+ of the dot product.
+ b : array_like
+ Second argument to the dot product.
+
+ Returns
+ -------
+ output : ndarray
+ Dot product of `a` and `b`. Can be an int, float, or
+ complex depending on the types of `a` and `b`.
+
+ See Also
+ --------
+ dot : Return the dot product without using the complex conjugate of the
+ first argument.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1+2j,3+4j])
+ >>> b = np.array([5+6j,7+8j])
+ >>> np.vdot(a, b)
+ (70-8j)
+ >>> np.vdot(b, a)
+ (70+8j)
+
+ Note that higher-dimensional arrays are flattened!
+
+ >>> a = np.array([[1, 4], [5, 6]])
+ >>> b = np.array([[4, 1], [2, 2]])
+ >>> np.vdot(a, b)
+ 30
+ >>> np.vdot(b, a)
+ 30
+ >>> 1*4 + 4*1 + 5*2 + 6*2
+ 30
+
+ """ # noqa: E501
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
+def bincount(x, weights=None, minlength=None):
+ """
+ bincount(x, /, weights=None, minlength=0)
+
+ Count number of occurrences of each value in array of non-negative ints.
+
+ The number of bins (of size 1) is one larger than the largest value in
+ `x`. If `minlength` is specified, there will be at least this number
+ of bins in the output array (though it will be longer if necessary,
+ depending on the contents of `x`).
+ Each bin gives the number of occurrences of its index value in `x`.
+ If `weights` is specified the input array is weighted by it, i.e. if a
+ value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
+ of ``out[n] += 1``.
+
+ Parameters
+ ----------
+ x : array_like, 1 dimension, nonnegative ints
+ Input array.
+ weights : array_like, optional
+ Weights, array of the same shape as `x`.
+ minlength : int, optional
+ A minimum number of bins for the output array.
+
+ Returns
+ -------
+ out : ndarray of ints
+ The result of binning the input array.
+ The length of `out` is equal to ``np.amax(x)+1``.
+
+ Raises
+ ------
+ ValueError
+ If the input is not 1-dimensional, or contains elements with negative
+ values, or if `minlength` is negative.
+ TypeError
+ If the type of the input is float or complex.
+
+ See Also
+ --------
+ histogram, digitize, unique
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.bincount(np.arange(5))
+ array([1, 1, 1, 1, 1])
+ >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
+ array([1, 3, 1, 1, 0, 0, 0, 1])
+
+ >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
+ >>> np.bincount(x).size == np.amax(x)+1
+ True
+
+ The input array needs to be of integer dtype, otherwise a
+ TypeError is raised:
+
+ >>> np.bincount(np.arange(5, dtype=float))
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
+ according to the rule 'safe'
+
+ A possible use of ``bincount`` is to perform sums over
+ variable-size chunks of an array, using the ``weights`` keyword.
+
+ >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
+ >>> x = np.array([0, 1, 1, 2, 2, 2])
+ >>> np.bincount(x, weights=w)
+ array([ 0.3, 0.7, 1.1])
+
+ """
+ return (x, weights)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
+def ravel_multi_index(multi_index, dims, mode=None, order=None):
+ """
+ ravel_multi_index(multi_index, dims, mode='raise', order='C')
+
+ Converts a tuple of index arrays into an array of flat
+ indices, applying boundary modes to the multi-index.
+
+ Parameters
+ ----------
+ multi_index : tuple of array_like
+ A tuple of integer arrays, one array for each dimension.
+ dims : tuple of ints
+ The shape of array into which the indices from ``multi_index`` apply.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices are handled. Can specify
+ either one mode or a tuple of modes, one mode per index.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ In 'clip' mode, a negative index which would normally
+ wrap will clip to 0 instead.
+ order : {'C', 'F'}, optional
+ Determines whether the multi-index should be viewed as
+ indexing in row-major (C-style) or column-major
+ (Fortran-style) order.
+
+ Returns
+ -------
+ raveled_indices : ndarray
+ An array of indices into the flattened version of an array
+ of dimensions ``dims``.
+
+ See Also
+ --------
+ unravel_index
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> arr = np.array([[3,6,6],[4,5,1]])
+ >>> np.ravel_multi_index(arr, (7,6))
+ array([22, 41, 37])
+ >>> np.ravel_multi_index(arr, (7,6), order='F')
+ array([31, 41, 13])
+ >>> np.ravel_multi_index(arr, (4,6), mode='clip')
+ array([22, 23, 19])
+ >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
+ array([12, 13, 13])
+
+ >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
+ 1621
+ """
+ return multi_index
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
+def unravel_index(indices, shape=None, order=None):
+ """
+ unravel_index(indices, shape, order='C')
+
+ Converts a flat index or array of flat indices into a tuple
+ of coordinate arrays.
+
+ Parameters
+ ----------
+ indices : array_like
+ An integer array whose elements are indices into the flattened
+ version of an array of dimensions ``shape``. Before version 1.6.0,
+ this function accepted just one index value.
+ shape : tuple of ints
+ The shape of the array to use for unraveling ``indices``.
+ order : {'C', 'F'}, optional
+ Determines whether the indices should be viewed as indexing in
+ row-major (C-style) or column-major (Fortran-style) order.
+
+ Returns
+ -------
+ unraveled_coords : tuple of ndarray
+ Each array in the tuple has the same shape as the ``indices``
+ array.
+
+ See Also
+ --------
+ ravel_multi_index
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.unravel_index([22, 41, 37], (7,6))
+ (array([3, 6, 6]), array([4, 5, 1]))
+ >>> np.unravel_index([31, 41, 13], (7,6), order='F')
+ (array([3, 6, 6]), array([4, 5, 1]))
+
+ >>> np.unravel_index(1621, (6,7,8,9))
+ (3, 1, 4, 1)
+
+ """
+ return (indices,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
+def copyto(dst, src, casting=None, where=None):
+ """
+ copyto(dst, src, casting='same_kind', where=True)
+
+ Copies values from one array to another, broadcasting as necessary.
+
+ Raises a TypeError if the `casting` rule is violated, and if
+ `where` is provided, it selects which elements to copy.
+
+ Parameters
+ ----------
+ dst : ndarray
+ The array into which values are copied.
+ src : array_like
+ The array from which values are copied.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when copying.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `dst`, and selects elements to copy from `src` to `dst`
+ wherever it contains the value True.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> A = np.array([4, 5, 6])
+ >>> B = [1, 2, 3]
+ >>> np.copyto(A, B)
+ >>> A
+ array([1, 2, 3])
+
+ >>> A = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> B = [[4, 5, 6], [7, 8, 9]]
+ >>> np.copyto(A, B)
+ >>> A
+ array([[4, 5, 6],
+ [7, 8, 9]])
+
+ """
+ return (dst, src, where)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
+def putmask(a, /, mask, values):
+ """
+ putmask(a, mask, values)
+
+ Changes elements of an array based on conditional and input values.
+
+ Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
+
+ If `values` is not the same size as `a` and `mask` then it will repeat.
+ This gives behavior different from ``a[mask] = values``.
+
+ Parameters
+ ----------
+ a : ndarray
+ Target array.
+ mask : array_like
+ Boolean mask array. It has to be the same shape as `a`.
+ values : array_like
+ Values to put into `a` where `mask` is True. If `values` is smaller
+ than `a` it will be repeated.
+
+ See Also
+ --------
+ place, put, take, copyto
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> np.putmask(x, x>2, x**2)
+ >>> x
+ array([[ 0, 1, 2],
+ [ 9, 16, 25]])
+
+ If `values` is smaller than `a` it is repeated:
+
+ >>> x = np.arange(5)
+ >>> np.putmask(x, x>1, [-33, -44])
+ >>> x
+ array([ 0, 1, -33, -44, -33])
+
+ """
+ return (a, mask, values)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
+def packbits(a, axis=None, bitorder='big'):
+ """
+ packbits(a, /, axis=None, bitorder='big')
+
+ Packs the elements of a binary-valued array into bits in a uint8 array.
+
+ The result is padded to full bytes by inserting zero bits at the end.
+
+ Parameters
+ ----------
+ a : array_like
+ An array of integers or booleans whose elements should be packed to
+ bits.
+ axis : int, optional
+ The dimension over which bit-packing is done.
+ ``None`` implies packing the flattened array.
+ bitorder : {'big', 'little'}, optional
+ The order of the input bits. 'big' will mimic bin(val),
+ ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
+ reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
+ Defaults to 'big'.
+
+ Returns
+ -------
+ packed : ndarray
+ Array of type uint8 whose elements represent bits corresponding to the
+ logical (0 or nonzero) value of the input elements. The shape of
+ `packed` has the same number of dimensions as the input (unless `axis`
+ is None, in which case the output is 1-D).
+
+ See Also
+ --------
+ unpackbits: Unpacks elements of a uint8 array into a binary-valued output
+ array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[[1,0,1],
+ ... [0,1,0]],
+ ... [[1,1,0],
+ ... [0,0,1]]])
+ >>> b = np.packbits(a, axis=-1)
+ >>> b
+ array([[[160],
+ [ 64]],
+ [[192],
+ [ 32]]], dtype=uint8)
+
+ Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
+ and 32 = 0010 0000.
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
+def unpackbits(a, axis=None, count=None, bitorder='big'):
+ """
+ unpackbits(a, /, axis=None, count=None, bitorder='big')
+
+ Unpacks elements of a uint8 array into a binary-valued output array.
+
+ Each element of `a` represents a bit-field that should be unpacked
+ into a binary-valued output array. The shape of the output array is
+ either 1-D (if `axis` is ``None``) or the same shape as the input
+ array with unpacking done along the axis specified.
+
+ Parameters
+ ----------
+ a : ndarray, uint8 type
+ Input array.
+ axis : int, optional
+ The dimension over which bit-unpacking is done.
+ ``None`` implies unpacking the flattened array.
+ count : int or None, optional
+ The number of elements to unpack along `axis`, provided as a way
+ of undoing the effect of packing a size that is not a multiple
+ of eight. A non-negative number means to only unpack `count`
+ bits. A negative number means to trim off that many bits from
+ the end. ``None`` means to unpack the entire array (the
+ default). Counts larger than the available number of bits will
+ add zero padding to the output. Negative counts must not
+ exceed the available number of bits.
+ bitorder : {'big', 'little'}, optional
+ The order of the returned bits. 'big' will mimic bin(val),
+ ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
+ the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
+ Defaults to 'big'.
+
+ Returns
+ -------
+ unpacked : ndarray, uint8 type
+ The elements are binary-valued (0 or 1).
+
+ See Also
+ --------
+ packbits : Packs the elements of a binary-valued array into bits in
+ a uint8 array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
+ >>> a
+ array([[ 2],
+ [ 7],
+ [23]], dtype=uint8)
+ >>> b = np.unpackbits(a, axis=1)
+ >>> b
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
+ >>> c = np.unpackbits(a, axis=1, count=-3)
+ >>> c
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0]], dtype=uint8)
+
+ >>> p = np.packbits(b, axis=0)
+ >>> np.unpackbits(p, axis=0)
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
+ >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
+ True
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
+def shares_memory(a, b, max_work=None):
+ """
+ shares_memory(a, b, /, max_work=None)
+
+ Determine if two arrays share memory.
+
+ .. warning::
+
+ This function can be exponentially slow for some inputs, unless
+ `max_work` is set to zero or a positive integer.
+ If in doubt, use `numpy.may_share_memory` instead.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem (maximum number
+ of candidate solutions to consider). The following special
+ values are recognized:
+
+ max_work=-1 (default)
+ The problem is solved exactly. In this case, the function returns
+ True only if there is an element shared between the arrays. Finding
+ the exact solution may take extremely long in some cases.
+ max_work=0
+ Only the memory bounds of a and b are checked.
+ This is equivalent to using ``may_share_memory()``.
+
+ Raises
+ ------
+ numpy.exceptions.TooHardError
+ Exceeded max_work.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ may_share_memory
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3, 4])
+ >>> np.shares_memory(x, np.array([5, 6, 7]))
+ False
+ >>> np.shares_memory(x[::2], x)
+ True
+ >>> np.shares_memory(x[::2], x[1::2])
+ False
+
+ Checking whether two arrays share memory is NP-complete, and
+ runtime may increase exponentially in the number of
+ dimensions. Hence, `max_work` should generally be set to a finite
+ number, as it is possible to construct examples that take
+ extremely long to run:
+
+ >>> from numpy.lib.stride_tricks import as_strided
+ >>> x = np.zeros([192163377], dtype=np.int8)
+ >>> x1 = as_strided(
+ ... x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
+ >>> x2 = as_strided(
+ ... x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
+ >>> np.shares_memory(x1, x2, max_work=1000)
+ Traceback (most recent call last):
+ ...
+ numpy.exceptions.TooHardError: Exceeded max_work
+
+ Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
+ around 1 minute for this case. It is possible to find problems
+ that take still significantly longer.
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
+def may_share_memory(a, b, max_work=None):
+ """
+ may_share_memory(a, b, /, max_work=None)
+
+ Determine if two arrays might share memory
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Only the memory bounds of a and b are checked by default.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem. See
+ `shares_memory` for details. Default for ``may_share_memory``
+ is to do a bounds check.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ shares_memory
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+ >>> x = np.zeros([3, 4])
+ >>> np.may_share_memory(x[:,0], x[:,1])
+ True
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
+def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
+ """
+ is_busday(
+ dates,
+ weekmask='1111100',
+ holidays=None,
+ busdaycal=None,
+ out=None
+ )
+
+ Calculates which of the given dates are valid days, and which are not.
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of bool, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of bool
+ An array with the same shape as ``dates``, containing True for
+ each valid day, and False for each invalid day.
+
+ See Also
+ --------
+ busdaycalendar : An object that specifies a custom set of valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> # The weekdays are Friday, Saturday, and Monday
+ ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ array([False, False, True])
+ """
+ return (dates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
+def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_offset(
+ dates,
+ offsets,
+ roll='raise',
+ weekmask='1111100',
+ holidays=None,
+ busdaycal=None,
+ out=None
+ )
+
+ First adjusts the date to fall on a valid day according to
+ the ``roll`` rule, then applies offsets to the given dates
+ counted in valid days.
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ offsets : array_like of int
+ The array of offsets, which is broadcast with ``dates``.
+ roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', \
+ 'modifiedfollowing', 'modifiedpreceding'}, optional
+ How to treat dates that do not fall on a valid day. The default
+ is 'raise'.
+
+ * 'raise' means to raise an exception for an invalid day.
+ * 'nat' means to return a NaT (not-a-time) for an invalid day.
+ * 'forward' and 'following' mean to take the first valid day
+ later in time.
+ * 'backward' and 'preceding' mean to take the first valid day
+ earlier in time.
+ * 'modifiedfollowing' means to take the first valid day
+ later in time unless it is across a Month boundary, in which
+ case to take the first valid day earlier in time.
+ * 'modifiedpreceding' means to take the first valid day
+ earlier in time unless it is across a Month boundary, in which
+ case to take the first valid day later in time.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of datetime64[D], optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of datetime64[D]
+ An array with a shape from broadcasting ``dates`` and ``offsets``
+ together, containing the dates with offsets applied.
+
+ See Also
+ --------
+ busdaycalendar : An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> # First business day in October 2011 (not accounting for holidays)
+ ... np.busday_offset('2011-10', 0, roll='forward')
+ np.datetime64('2011-10-03')
+ >>> # Last business day in February 2012 (not accounting for holidays)
+ ... np.busday_offset('2012-03', -1, roll='forward')
+ np.datetime64('2012-02-29')
+ >>> # Third Wednesday in January 2011
+ ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
+ np.datetime64('2011-01-19')
+ >>> # 2012 Mother's Day in Canada and the U.S.
+ ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
+ np.datetime64('2012-05-13')
+
+ >>> # First business day on or after a date
+ ... np.busday_offset('2011-03-20', 0, roll='forward')
+ np.datetime64('2011-03-21')
+ >>> np.busday_offset('2011-03-22', 0, roll='forward')
+ np.datetime64('2011-03-22')
+ >>> # First business day after a date
+ ... np.busday_offset('2011-03-20', 1, roll='backward')
+ np.datetime64('2011-03-21')
+ >>> np.busday_offset('2011-03-22', 1, roll='backward')
+ np.datetime64('2011-03-23')
+ """
+ return (dates, offsets, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
+def busday_count(begindates, enddates, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_count(
+ begindates,
+ enddates,
+ weekmask='1111100',
+ holidays=[],
+ busdaycal=None,
+ out=None
+ )
+
+ Counts the number of valid days between `begindates` and
+ `enddates`, not including the day of `enddates`.
+
+ If ``enddates`` specifies a date value that is earlier than the
+ corresponding ``begindates`` date value, the count will be negative.
+
+ Parameters
+ ----------
+ begindates : array_like of datetime64[D]
+ The array of the first dates for counting.
+ enddates : array_like of datetime64[D]
+ The array of the end dates for counting, which are excluded
+ from the count themselves.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of int, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of int
+ An array with a shape from broadcasting ``begindates`` and ``enddates``
+ together, containing the number of valid days between
+ the begin and end dates.
+
+ See Also
+ --------
+ busdaycalendar : An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> # Number of weekdays in January 2011
+ ... np.busday_count('2011-01', '2011-02')
+ 21
+ >>> # Number of weekdays in 2011
+ >>> np.busday_count('2011', '2012')
+ 260
+ >>> # Number of Saturdays in 2011
+ ... np.busday_count('2011', '2012', weekmask='Sat')
+ 53
+ """
+ return (begindates, enddates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(
+ _multiarray_umath.datetime_as_string)
+def datetime_as_string(arr, unit=None, timezone=None, casting=None):
+ """
+ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
+
+ Convert an array of datetimes into an array of strings.
+
+ Parameters
+ ----------
+ arr : array_like of datetime64
+ The array of UTC timestamps to format.
+ unit : str
+ One of None, 'auto', or
+ a :ref:`datetime unit `.
+ timezone : {'naive', 'UTC', 'local'} or tzinfo
+ Timezone information to use when displaying the datetime. If 'UTC',
+ end with a Z to indicate UTC time. If 'local', convert to the local
+ timezone first, and suffix with a +-#### timezone offset. If a tzinfo
+ object, then do as with 'local', but use the specified timezone.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
+ Casting to allow when changing between datetime units.
+
+ Returns
+ -------
+ str_arr : ndarray
+ An array of strings the same shape as `arr`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import pytz
+ >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
+ >>> d
+ array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
+ '2002-10-27T07:30'], dtype='datetime64[m]')
+
+ Setting the timezone to UTC shows the same information, but with a Z suffix
+
+ >>> np.datetime_as_string(d, timezone='UTC')
+ array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
+ '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
+ array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
+ '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h')
+ array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
+ dtype='>> np.datetime_as_string(d, unit='s')
+ array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
+ '2002-10-27T07:30:00'], dtype='