From 7b5f81958a9e0274d340869255bc4b3770d7729a Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 20 Feb 2019 22:52:29 +0100 Subject: [PATCH] Limit compilation parallelism On smaller platforms like the Raspberry Pi, which have a deficit of physical memory, the default algorithm for determining compilation parallelism is far too optimistic and usually results in crashing the platform. This modification limits the number of parallel jobs to the size of RAM in Gb. On a mid-size PC (e.g. quad-core with 8+Gb of RAM), this will make no difference. On a Pi (quad-core with 1Gb of RAM), this will limit it to 1 job at a time (which is ideal). --- setup.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/setup.py b/setup.py index 4bb958fb0..169872e23 100644 --- a/setup.py +++ b/setup.py @@ -22,6 +22,18 @@ def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=N cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) # parallel code N = 2*multiprocessing.cpu_count()# number of parallel compilations + try: + # On Unix-like platforms attempt to obtain the total memory in the + # machine and limit the number of parallel jobs to the number of Gbs + # of RAM (to avoid killing smaller platforms like the Pi) + mem = os.sysconf('SC_PHYS_PAGES') * os.sysconf('SC_PAGE_SIZE') # bytes + except (AttributeError, ValueError): + # Couldn't query RAM; don't limit parallelism (it's probably a well + # equipped Windows / Mac OS X box) + pass + else: + mem = max(1, int(round(mem / 1024 ** 3))) # convert to Gb + N = min(mem, N) def _single_compile(obj): try: src, ext = build[obj] except KeyError: return