summaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/cdoc/Doxyfile29
-rw-r--r--doc/cdoc/Makefile10
-rw-r--r--doc/cdoc/README31
-rwxr-xr-xdoc/cdoc/numpyfilter.py104
-rw-r--r--doc/release/upcoming_changes/19687.change.rst8
-rw-r--r--doc/release/upcoming_changes/19805.new_feature.rst5
-rw-r--r--doc/release/upcoming_changes/19921.deprecation.rst3
-rw-r--r--doc/source/f2py/index.rst2
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst4
-rw-r--r--doc/source/user/basics.rec.rst2
-rw-r--r--doc/source/user/building.rst3
-rw-r--r--doc/source/user/c-info.python-as-glue.rst87
12 files changed, 66 insertions, 222 deletions
diff --git a/doc/cdoc/Doxyfile b/doc/cdoc/Doxyfile
deleted file mode 100644
index c9c386e4e..000000000
--- a/doc/cdoc/Doxyfile
+++ /dev/null
@@ -1,29 +0,0 @@
-# Doxyfile for NumPy C API
-# See http://www.doxygen.nl/manual/config.html
-PROJECT_NAME = numpy
-PROJECT_NUMBER = 2.0.0
-OUTPUT_DIRECTORY = build
-STRIP_FROM_PATH = ../../numpy/core
-INHERIT_DOCS = YES
-TAB_SIZE = 8
-OPTIMIZE_OUTPUT_FOR_C = YES
-EXTRACT_ALL = YES
-EXTRACT_PRIVATE = YES
-EXTRACT_STATIC = YES
-CASE_SENSE_NAMES = NO
-INPUT = ../../numpy/core/src \
- ../../numpy/core/include
-FILE_PATTERNS = *.h *.c *.src
-RECURSIVE = YES
-INPUT_FILTER = ./numpyfilter.py
-REFERENCED_BY_RELATION = YES
-REFERENCES_RELATION = YES
-ALPHABETICAL_INDEX = NO
-GENERATE_HTML = YES
-HTML_TIMESTAMP = YES
-GENERATE_TREEVIEW = YES
-SEARCHENGINE = NO
-GENERATE_LATEX = NO
-PAPER_TYPE = a4wide
-GENERATE_XML = NO
-HAVE_DOT = NO
diff --git a/doc/cdoc/Makefile b/doc/cdoc/Makefile
deleted file mode 100644
index 8b9deada8..000000000
--- a/doc/cdoc/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-all: build
-
-build:
- doxygen
-
-clean:
- rm -rf build
-
-.PHONY: all build clean
-
diff --git a/doc/cdoc/README b/doc/cdoc/README
deleted file mode 100644
index a5363cfa1..000000000
--- a/doc/cdoc/README
+++ /dev/null
@@ -1,31 +0,0 @@
-cdoc
-====
-
-This is a simple Doxygen project for building NumPy C code documentation,
-with docstrings extracted from the C sources themselves.
-
-The understood syntax for documentation in the C source is
-
- /*
- * Some text in reStructuredText format
- */
- int function_to_which_the_text_applies()
- {
- ...
- }
-
- /*
- * More text in reStructuredText format
- */
- struct
- {
- int variable_1; /* Documentation for variable_1 */
-
- /*
- * Documentation for variable_2
- */
- int variable_2;
- } struct_name_t;
-
-Please do not use JavaDoc or Doxygen-specific formatting at the moment.
-
diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py
deleted file mode 100755
index d3cfe18f0..000000000
--- a/doc/cdoc/numpyfilter.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python3
-"""
-numpyfilter.py [-h] inputfile
-
-Interpret C comments as ReStructuredText, and replace them by the HTML output.
-Also, add Doxygen /** and /**< syntax automatically where appropriate.
-
-"""
-import sys
-import re
-import os
-import textwrap
-
-from numpy.compat import pickle
-
-CACHE_FILE = 'build/rst-cache.pck'
-
-def main():
- import argparse
-
- parser = argparse.ArgumentParser(usage=__doc__.strip())
- parser.add_argument('input_file', help='input file')
- args = parser.parse_args()
-
- comment_re = re.compile(r'(\n.*?)/\*(.*?)\*/', re.S)
-
- cache = load_cache()
-
- try:
- with open(args.input_file, 'r') as f:
- text = f.read()
- text = comment_re.sub(lambda m: process_match(m, cache), text)
- sys.stdout.write(text)
- finally:
- save_cache(cache)
-
-def filter_comment(text):
- if text.startswith('NUMPY_API'):
- text = text[9:].strip()
- if text.startswith('UFUNC_API'):
- text = text[9:].strip()
-
- html = render_html(text)
- return html
-
-def process_match(m, cache=None):
- pre, rawtext = m.groups()
-
- preline = pre.split("\n")[-1]
-
- if cache is not None and rawtext in cache:
- text = cache[rawtext]
- else:
- text = re.compile(r'^\s*\*', re.M).sub('', rawtext)
- text = textwrap.dedent(text)
- text = filter_comment(text)
-
- if cache is not None:
- cache[rawtext] = text
-
- if preline.strip():
- return pre + "/**< " + text + " */"
- else:
- return pre + "/** " + text + " */"
-
-def load_cache():
- if os.path.exists(CACHE_FILE):
- with open(CACHE_FILE, 'rb') as f:
- try:
- cache = pickle.load(f)
- except Exception:
- cache = {}
- else:
- cache = {}
- return cache
-
-def save_cache(cache):
- with open(CACHE_FILE + '.new', 'wb') as f:
- pickle.dump(cache, f)
- os.rename(CACHE_FILE + '.new', CACHE_FILE)
-
-def render_html(text):
- import docutils.parsers.rst
- import docutils.writers.html4css1
- import docutils.core
-
- docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'title-reference'
- writer = docutils.writers.html4css1.Writer()
- parts = docutils.core.publish_parts(
- text,
- writer=writer,
- settings_overrides = dict(halt_level=5,
- traceback=True,
- default_reference_context='title-reference',
- stylesheet_path='',
- # security settings:
- raw_enabled=0,
- file_insertion_enabled=0,
- _disable_config=1,
- )
- )
- return parts['html_body']
-
-if __name__ == "__main__": main()
diff --git a/doc/release/upcoming_changes/19687.change.rst b/doc/release/upcoming_changes/19687.change.rst
new file mode 100644
index 000000000..c7f7512b6
--- /dev/null
+++ b/doc/release/upcoming_changes/19687.change.rst
@@ -0,0 +1,8 @@
+str/repr of complex dtypes now include space after punctuation
+--------------------------------------------------------------
+
+The repr of ``np.dtype({"names": ["a"], "formats": [int], "offsets": [2]})`` is
+now ``dtype({'names': ['a'], 'formats': ['<i8'], 'offsets': [2], 'itemsize': 10})``,
+whereas spaces where previously omitted after colons and between fields.
+
+The old behavior can be restored via ``np.set_printoptions(legacy="1.21")``.
diff --git a/doc/release/upcoming_changes/19805.new_feature.rst b/doc/release/upcoming_changes/19805.new_feature.rst
new file mode 100644
index 000000000..f59409254
--- /dev/null
+++ b/doc/release/upcoming_changes/19805.new_feature.rst
@@ -0,0 +1,5 @@
+Symbolic parser for Fortran dimension specifications
+----------------------------------------------------
+A new symbolic parser has been added to f2py in order to correctly parse
+dimension specifications. The parser is the basis for future improvements
+and provides compatibility with Draft Fortran 202x.
diff --git a/doc/release/upcoming_changes/19921.deprecation.rst b/doc/release/upcoming_changes/19921.deprecation.rst
new file mode 100644
index 000000000..17fa0f605
--- /dev/null
+++ b/doc/release/upcoming_changes/19921.deprecation.rst
@@ -0,0 +1,3 @@
+* the misspelled keyword argument ``delimitor`` of
+ ``numpy.ma.mrecords.fromtextfile()`` has been changed into
+ ``delimiter``, using it will emit a deprecation warning.
diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst
index 07d26e39e..c774a0df6 100644
--- a/doc/source/f2py/index.rst
+++ b/doc/source/f2py/index.rst
@@ -1,3 +1,5 @@
+.. _f2py:
+
=====================================
F2PY user guide and reference manual
=====================================
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 0a684d6f5..605a4ae71 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -967,8 +967,8 @@ PyUFunc_Type and PyUFuncObject
.. deprecated:: 1.22
Some fallback support for this slot exists, but will be removed
- eventually. A univiersal function which relied on this will have
- eventually have to be ported.
+ eventually. A universal function that relied on this will
+ have to be ported eventually.
See ref:`NEP 41 <NEP41>` and ref:`NEP 43 <NEP43>`
.. c:member:: void *reserved2
diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst
index 0524fde8e..1e6f30506 100644
--- a/doc/source/user/basics.rec.rst
+++ b/doc/source/user/basics.rec.rst
@@ -128,7 +128,7 @@ summary they are:
... 'formats': ['i4', 'f4'],
... 'offsets': [0, 4],
... 'itemsize': 12})
- dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
+ dtype({'names': ['col1', 'col2'], 'formats': ['<i4', '<f4'], 'offsets': [0, 4], 'itemsize': 12})
Offsets may be chosen such that the fields overlap, though this will mean
that assigning to one field may clobber any overlapping field's data. As
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index 10983ce8f..22efca4a6 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -45,6 +45,9 @@ Building NumPy requires the following software installed:
2) Compilers
+ Much of NumPy is written in C. You will need a C compiler that complies
+ with the C99 standard.
+
While a FORTRAN 77 compiler is not necessary for building NumPy, it is
needed to run the ``numpy.f2py`` tests. These tests are skipped if the
compiler is not auto-detected.
diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst
index 2798aa08a..6d514f146 100644
--- a/doc/source/user/c-info.python-as-glue.rst
+++ b/doc/source/user/c-info.python-as-glue.rst
@@ -1,6 +1,6 @@
-********************
+====================
Using Python as glue
-********************
+====================
| There is no conversation more boring than the one where everybody
| agrees.
@@ -124,9 +124,9 @@ Creating source for a basic extension module
Probably the easiest way to introduce f2py is to offer a simple
example. Here is one of the subroutines contained in a file named
-:file:`add.f`:
+:file:`add.f`
-.. code-block:: none
+.. code-block:: fortran
C
SUBROUTINE ZADD(A,B,C,N)
@@ -149,14 +149,14 @@ routine can be automatically generated by f2py::
You should be able to run this command assuming your search-path is
set-up properly. This command will produce an extension module named
-addmodule.c in the current directory. This extension module can now be
+:file:`addmodule.c` in the current directory. This extension module can now be
compiled and used from Python just like any other extension module.
Creating a compiled extension module
------------------------------------
-You can also get f2py to compile add.f and also compile its produced
+You can also get f2py to both compile :file:`add.f` along with the produced
extension module leaving only a shared-library extension file that can
be imported from Python::
@@ -211,7 +211,7 @@ interface file use the -h option::
This command leaves the file add.pyf in the current directory. The
section of this file corresponding to zadd is:
-.. code-block:: none
+.. code-block:: fortran
subroutine zadd(a,b,c,n) ! in :add:add.f
double complex dimension(*) :: a
@@ -224,7 +224,7 @@ By placing intent directives and checking code, the interface can be
cleaned up quite a bit until the Python module method is both easier
to use and more robust.
-.. code-block:: none
+.. code-block:: fortran
subroutine zadd(a,b,c,n) ! in :add:add.f
double complex dimension(n) :: a
@@ -277,9 +277,9 @@ Inserting directives in Fortran source
The nice interface can also be generated automatically by placing the
variable directives as special comments in the original Fortran code.
-Thus, if I modify the source code to contain:
+Thus, if the source code is modified to contain:
-.. code-block:: none
+.. code-block:: fortran
C
SUBROUTINE ZADD(A,B,C,N)
@@ -298,14 +298,14 @@ Thus, if I modify the source code to contain:
20 CONTINUE
END
-Then, I can compile the extension module using::
+Then, one can compile the extension module using::
f2py -c -m add add.f
The resulting signature for the function add.zadd is exactly the same
one that was created previously. If the original source code had
contained ``A(N)`` instead of ``A(*)`` and so forth with ``B`` and ``C``,
-then I could obtain (nearly) the same interface simply by placing the
+then nearly the same interface can be obtained by placing the
``INTENT(OUT) :: C`` comment line in the source code. The only difference
is that ``N`` would be an optional input that would default to the length
of ``A``.
@@ -320,7 +320,7 @@ precision floating-point numbers using a fixed averaging filter. The
advantage of using Fortran to index into multi-dimensional arrays
should be clear from this example.
-.. code-block:: none
+.. code-block::
SUBROUTINE DFILTER2D(A,B,M,N)
C
@@ -407,13 +407,12 @@ conversion of the .pyf file to a .c file is handled by `numpy.disutils`.
Conclusion
----------
-The interface definition file (.pyf) is how you can fine-tune the
-interface between Python and Fortran. There is decent documentation
-for f2py found in the numpy/f2py/docs directory where-ever NumPy is
-installed on your system (usually under site-packages). There is also
-more information on using f2py (including how to use it to wrap C
-codes) at https://scipy-cookbook.readthedocs.io under the "Interfacing
-With Other Languages" heading.
+The interface definition file (.pyf) is how you can fine-tune the interface
+between Python and Fortran. There is decent documentation for f2py at
+:ref:`f2py`. There is also more information on using f2py (including how to use
+it to wrap C codes) at the `"Interfacing With Other Languages" heading of the
+SciPy Cookbook.
+<https://scipy-cookbook.readthedocs.io/items/idx_interfacing_with_other_languages.html>`_
The f2py method of linking compiled code is currently the most
sophisticated and integrated approach. It allows clean separation of
@@ -422,7 +421,7 @@ distribution of the extension module. The only draw-back is that it
requires the existence of a Fortran compiler in order for a user to
install the code. However, with the existence of the free-compilers
g77, gfortran, and g95, as well as high-quality commercial compilers,
-this restriction is not particularly onerous. In my opinion, Fortran
+this restriction is not particularly onerous. In our opinion, Fortran
is still the easiest way to write fast and clear code for scientific
computing. It handles complex numbers, and multi-dimensional indexing
in the most straightforward way. Be aware, however, that some Fortran
@@ -493,7 +492,7 @@ Complex addition in Cython
Here is part of a Cython module named ``add.pyx`` which implements the
complex addition functions we previously implemented using f2py:
-.. code-block:: none
+.. code-block:: cython
cimport cython
cimport numpy as np
@@ -546,7 +545,7 @@ Image filter in Cython
The two-dimensional example we created using Fortran is just as easy to write
in Cython:
-.. code-block:: none
+.. code-block:: cython
cimport numpy as np
import numpy as np
@@ -809,7 +808,7 @@ Calling the function
The function is accessed as an attribute of or an item from the loaded
shared-library. Thus, if ``./mylib.so`` has a function named
-``cool_function1``, I could access this function either as:
+``cool_function1``, it may be accessed either as:
.. code-block:: python
@@ -859,7 +858,7 @@ kind of array from a given input.
Complete example
----------------
-In this example, I will show how the addition function and the filter
+In this example, we will demonstrate how the addition function and the filter
function implemented previously using the other approaches can be
implemented using ctypes. First, the C code which implements the
algorithms contains the functions ``zadd``, ``dadd``, ``sadd``, ``cadd``,
@@ -1073,7 +1072,7 @@ Its disadvantages include
- It is difficult to distribute an extension module made using ctypes
because of a lack of support for building shared libraries in
- distutils (but I suspect this will change in time).
+ distutils.
- You must have shared-libraries of your code (no static libraries).
@@ -1095,15 +1094,14 @@ Additional tools you may find useful
These tools have been found useful by others using Python and so are
included here. They are discussed separately because they are
either older ways to do things now handled by f2py, Cython, or ctypes
-(SWIG, PyFort) or because I don't know much about them (SIP, Boost).
-I have not added links to these
-methods because my experience is that you can find the most relevant
-link faster using Google or some other search engine, and any links
-provided here would be quickly dated. Do not assume that just because
-it is included in this list, I don't think the package deserves your
-attention. I'm including information about these packages because many
-people have found them useful and I'd like to give you as many options
-as possible for tackling the problem of easily integrating your code.
+(SWIG, PyFort) or because of a lack of reasonable documentation (SIP, Boost).
+Links to these methods are not included since the most relevant
+can be found using Google or some other search engine, and any links provided
+here would be quickly dated. Do not assume that inclusion in this list means
+that the package deserves attention. Information about these packages are
+collected here because many people have found them useful and we'd like to give
+you as many options as possible for tackling the problem of easily integrating
+your code.
SWIG
@@ -1132,12 +1130,12 @@ to the Python-specific typemaps, SWIG can be used to interface a
library with other languages such as Perl, Tcl, and Ruby.
My experience with SWIG has been generally positive in that it is
-relatively easy to use and quite powerful. I used to use it quite
+relatively easy to use and quite powerful. It has been used
often before becoming more proficient at writing C-extensions.
-However, I struggled writing custom interfaces with SWIG because it
+However, writing custom interfaces with SWIG is often troublesome because it
must be done using the concept of typemaps which are not Python
-specific and are written in a C-like syntax. Therefore, I tend to
-prefer other gluing strategies and would only attempt to use SWIG to
+specific and are written in a C-like syntax. Therefore, other gluing strategies
+are preferred and SWIG would be probably considered only to
wrap a very-large C/C++ library. Nonetheless, there are others who use
SWIG quite happily.
@@ -1170,12 +1168,11 @@ those libraries which provides a concise interface for binding C++
classes and functions to Python. The amazing part of the Boost.Python
approach is that it works entirely in pure C++ without introducing a
new syntax. Many users of C++ report that Boost.Python makes it
-possible to combine the best of both worlds in a seamless fashion. I
-have not used Boost.Python because I am not a big user of C++ and
-using Boost to wrap simple C-subroutines is usually over-kill. It's
-primary purpose is to make C++ classes available in Python. So, if you
-have a set of C++ classes that need to be integrated cleanly into
-Python, consider learning about and using Boost.Python.
+possible to combine the best of both worlds in a seamless fashion. Using Boost
+to wrap simple C-subroutines is usually over-kill. Its primary purpose is to
+make C++ classes available in Python. So, if you have a set of C++ classes that
+need to be integrated cleanly into Python, consider learning about and using
+Boost.Python.
PyFort