diff --git a/build/pkgs/gnumake_tokenpool/SPKG.rst b/build/pkgs/gnumake_tokenpool/SPKG.rst new file mode 100644 index 00000000000..a5c43be63b5 --- /dev/null +++ b/build/pkgs/gnumake_tokenpool/SPKG.rst @@ -0,0 +1,21 @@ +gnumake_tokenpool: Jobclient and jobserver for the GNU make tokenpool protocol +============================================================================== + +Description +----------- + +The project has implementations in multiple languages. + +We only install the implementation in Python. + + +License +------- + +MIT + + +Upstream Contact +---------------- + +- https://github.com/milahu/gnumake-tokenpool (upstream) diff --git a/build/pkgs/gnumake_tokenpool/checksums.ini b/build/pkgs/gnumake_tokenpool/checksums.ini new file mode 100644 index 00000000000..62f631ea40a --- /dev/null +++ b/build/pkgs/gnumake_tokenpool/checksums.ini @@ -0,0 +1,5 @@ +tarball=gnumake_tokenpool-VERSION-py3-none-any.whl +sha1=a060f03e0306a85bc1a91a450e457be83ed371e9 +md5=834ccc4d6d52741c5eabac1bdb8f39b2 +cksum=1679797266 +upstream_url=https://pypi.io/packages/py3/g/gnumake_tokenpool/gnumake_tokenpool-VERSION-py3-none-any.whl diff --git a/build/pkgs/gnumake_tokenpool/dependencies b/build/pkgs/gnumake_tokenpool/dependencies new file mode 100644 index 00000000000..25a25c98198 --- /dev/null +++ b/build/pkgs/gnumake_tokenpool/dependencies @@ -0,0 +1 @@ + | $(PYTHON) $(PYTHON_TOOLCHAIN) diff --git a/build/pkgs/gnumake_tokenpool/install-requires.txt b/build/pkgs/gnumake_tokenpool/install-requires.txt new file mode 100644 index 00000000000..0ee4452edd3 --- /dev/null +++ b/build/pkgs/gnumake_tokenpool/install-requires.txt @@ -0,0 +1 @@ +gnumake-tokenpool diff --git a/build/pkgs/gnumake_tokenpool/package-version.txt b/build/pkgs/gnumake_tokenpool/package-version.txt new file mode 100644 index 00000000000..bcab45af15a --- /dev/null +++ b/build/pkgs/gnumake_tokenpool/package-version.txt @@ -0,0 +1 @@ +0.0.3 diff --git a/build/pkgs/gnumake_tokenpool/type b/build/pkgs/gnumake_tokenpool/type new file mode 100644 index 00000000000..a6a7b9cd726 --- /dev/null +++ b/build/pkgs/gnumake_tokenpool/type @@ -0,0 +1 @@ +standard diff --git a/pkgs/sagemath-categories/tox.ini b/pkgs/sagemath-categories/tox.ini index 5469be55b3c..b4bb49d132a 100644 --- a/pkgs/sagemath-categories/tox.ini +++ b/pkgs/sagemath-categories/tox.ini @@ -26,6 +26,7 @@ passenv = # Parallel build SAGE_NUM_THREADS SAGE_NUM_THREADS_PARALLEL + MAKEFLAGS # SAGE_VENV only for referring to the basepython or finding the wheels sagepython, sagewheels: SAGE_VENV # Location of the wheels diff --git a/pkgs/sagemath-environment/tox.ini b/pkgs/sagemath-environment/tox.ini index ff2eef44f66..6bf1f2a6ebb 100644 --- a/pkgs/sagemath-environment/tox.ini +++ b/pkgs/sagemath-environment/tox.ini @@ -26,6 +26,7 @@ passenv = # Parallel build SAGE_NUM_THREADS SAGE_NUM_THREADS_PARALLEL + MAKEFLAGS # SAGE_VENV only for referring to the basepython or finding the wheels sagepython, sagewheels: SAGE_VENV # Location of the wheels diff --git a/pkgs/sagemath-objects/tox.ini b/pkgs/sagemath-objects/tox.ini index 0ca92ea9570..a8f5a6d6a76 100644 --- a/pkgs/sagemath-objects/tox.ini +++ b/pkgs/sagemath-objects/tox.ini @@ -26,6 +26,7 @@ passenv = # Parallel build SAGE_NUM_THREADS SAGE_NUM_THREADS_PARALLEL + MAKEFLAGS # SAGE_VENV only for referring to the basepython or finding the wheels sagepython, sagewheels: SAGE_VENV # Location of the wheels diff --git a/pkgs/sagemath-repl/tox.ini b/pkgs/sagemath-repl/tox.ini index 6944df002f5..679153a2947 100644 --- a/pkgs/sagemath-repl/tox.ini +++ b/pkgs/sagemath-repl/tox.ini @@ -26,6 +26,7 @@ passenv = # Parallel build SAGE_NUM_THREADS SAGE_NUM_THREADS_PARALLEL + MAKEFLAGS # SAGE_VENV only for referring to the basepython or finding the wheels sagepython, sagewheels: SAGE_VENV # Location of the wheels diff --git a/pkgs/sagemath-standard/tox.ini b/pkgs/sagemath-standard/tox.ini index c14f97f8bbf..6aae1ef1bfa 100644 --- a/pkgs/sagemath-standard/tox.ini +++ b/pkgs/sagemath-standard/tox.ini @@ -75,6 +75,7 @@ passenv = # Parallel build SAGE_NUM_THREADS SAGE_NUM_THREADS_PARALLEL + MAKEFLAGS # SAGE_VENV only for referring to the basepython or finding the wheels sagepython, sagewheels: SAGE_VENV # Location of the wheels diff --git a/src/bin/sage-runtests b/src/bin/sage-runtests index 01b7ca44868..e25cebb48a8 100755 --- a/src/bin/sage-runtests +++ b/src/bin/sage-runtests @@ -26,7 +26,8 @@ if __name__ == "__main__": ".py, .pyx, .pxd, .pxi, .sage, .spyx, .tex, .rst.") parser.add_argument("-p", "--nthreads", dest="nthreads", type=int, nargs='?', const=0, default=1, metavar="N", - help="tests in parallel using N threads with 0 interpreted as max(2, min(8, cpu_count()))") + help="test in parallel using N threads, with 0 interpreted as max(2, min(8, cpu_count())); " + "when run under the control of the GNU make jobserver (make -j), request as most N job slots") parser.add_argument("-T", "--timeout", type=int, default=-1, help="timeout (in seconds) for doctesting one file, 0 for no timeout") what = parser.add_mutually_exclusive_group() what.add_argument("-a", "--all", action="store_true", default=False, help="test all files in the Sage library") diff --git a/src/doc/en/developer/doctesting.rst b/src/doc/en/developer/doctesting.rst index 589c9260134..f7c54bbbeec 100644 --- a/src/doc/en/developer/doctesting.rst +++ b/src/doc/en/developer/doctesting.rst @@ -512,7 +512,10 @@ doctests. This determines the number of threads by reading the environment variable :envvar:`MAKE`: if it is set to ``make -j12``, then use 12 threads. If :envvar:`MAKE` is not set, then by default it uses the number of CPU cores (as determined by the Python function -``multiprocessing.cpu_count()``) with a minimum of 2 and a maximum of 8. +:func:`multiprocessing.cpu_count`) with a minimum of 2 and a maximum of 8. +(When this runs under the control of the `GNU make jobserver +`_, then Sage +will request as most this number of job slots.) In any case, this will test the Sage library with multiple threads:: diff --git a/src/doc/en/installation/source.rst b/src/doc/en/installation/source.rst index b16fd8e7295..485d4916ec8 100644 --- a/src/doc/en/installation/source.rst +++ b/src/doc/en/installation/source.rst @@ -782,14 +782,21 @@ Here are some of the more commonly used variables affecting the build process: Some users on single-core macOS machines have reported problems when building Sage with ``MAKE='make -jNUM'`` with ``NUM`` greater than one. -- :envvar:`SAGE_NUM_THREADS` - if set to a number, then when building the - documentation, parallel doctesting, or running ``sage -b``, use this many - threads. +- :envvar:`SAGE_NUM_THREADS` - if set to a number, then when rebuilding with + ``sage -b`` or parallel doctesting with ``sage -t -p 0``, use at most this + many threads. + If this is not set, then determine the number of threads using the value of the :envvar:`MAKE` (see above) or :envvar:`MAKEFLAGS` environment variables. - If none of these specifies a number of jobs, use one thread (except for - parallel testing: there we use a default of the number of CPU cores, with a - maximum of 8 and a minimum of 2). + If none of these specifies a number of jobs, + + - ``sage -b`` only uses one thread + + - ``sage -t -p 0`` uses a default of the number of CPU cores, with a + maximum of 8 and a minimum of 2. + + When ``sage -t -p`` runs under the control of the GNU ``make`` + jobserver, then Sage will request as most this number of job slots. - :envvar:`V` - if set to ``0``, silence the build. Instead of showing a detailed compilation log, only one line of output is shown diff --git a/src/sage/doctest/forker.py b/src/sage/doctest/forker.py index 05bd5d0fd8e..4315cd1ba9f 100644 --- a/src/sage/doctest/forker.py +++ b/src/sage/doctest/forker.py @@ -1803,6 +1803,16 @@ def parallel_dispatch(self): """ opt = self.controller.options + job_client = None + try: + from gnumake_tokenpool import JobClient, NoJobServer + try: + job_client = JobClient() + except NoJobServer: + pass + except ImportError: + pass + source_iter = iter(self.controller.sources) # If timeout was 0, simply set a very long time @@ -1925,6 +1935,9 @@ def sel_exit(): w.copied_pid = w.pid w.close() finished.append(w) + if job_client: + job_client.release() + workers = new_workers # Similarly, process finished workers. @@ -1959,11 +1972,14 @@ def sel_exit(): break # Start new workers if possible - while source_iter is not None and len(workers) < opt.nthreads: + while (source_iter is not None and len(workers) < opt.nthreads + and (not job_client or job_client.acquire())): try: source = next(source_iter) except StopIteration: source_iter = None + if job_client: + job_client.release() else: # Start a new worker. import copy @@ -2040,6 +2056,8 @@ def sel_exit(): sleep(die_timeout) for w in workers: w.kill() + if job_client: + job_client.release() finally: os._exit(0)