summaryrefslogtreecommitdiffstats
path: root/Wrappers/Python
diff options
context:
space:
mode:
authorGemma Fardell <47746591+gfardell@users.noreply.github.com>2020-01-23 11:52:46 +0000
committerEdoardo Pasca <edo.paskino@gmail.com>2020-01-23 11:52:46 +0000
commit894f35c9be404bc2c13f90f4a6184a545029181a (patch)
tree17938025fe60afac7006b8fb6c4e7016bf687ccd /Wrappers/Python
parentfecff8ded735d309aba43d30226c0bb51386c905 (diff)
downloadframework-894f35c9be404bc2c13f90f4a6184a545029181a.tar.gz
framework-894f35c9be404bc2c13f90f4a6184a545029181a.tar.bz2
framework-894f35c9be404bc2c13f90f4a6184a545029181a.tar.xz
framework-894f35c9be404bc2c13f90f4a6184a545029181a.zip
Allows user to set number of threads used by openMP in C library grad… (#476)
* Allows user to set number of threads used by openMP in C library gradient operator. Changed to release build flags * closes #477 * added test function for c lib thread deployment * improved thread scaling for neumann algoritims * removed unnecessary thread sync * reverts omp number of threads at the end of the c function call Co-authored-by: Edoardo Pasca <edo.paskino@gmail.com>
Diffstat (limited to 'Wrappers/Python')
-rw-r--r--Wrappers/Python/CMakeLists.txt2
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py41
2 files changed, 29 insertions, 14 deletions
diff --git a/Wrappers/Python/CMakeLists.txt b/Wrappers/Python/CMakeLists.txt
index 9104afd..0c24540 100644
--- a/Wrappers/Python/CMakeLists.txt
+++ b/Wrappers/Python/CMakeLists.txt
@@ -76,7 +76,9 @@ if (BUILD_PYTHON_WRAPPER)
)
endif()
#set (PYTHON_DEST ${CMAKE_INSTALL_PREFIX}/python/)
+
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/build/lib/ccpi
+
DESTINATION ${PYTHON_DEST})
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/data/ DESTINATION ${CMAKE_INSTALL_PREFIX}/share/ccpi)
#file(TOUCH ${PYTHON_DEST}/edo/__init__.py)
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index 6391cf7..a45c3d2 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -25,6 +25,11 @@ from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataCon
import numpy
import warnings
+#default nThreads
+import multiprocessing
+cpus = multiprocessing.cpu_count()
+NUM_THREADS = max(int(cpus/2),1)
+
NEUMANN = 'Neumann'
PERIODIC = 'Periodic'
C = 'c'
@@ -51,10 +56,11 @@ class Gradient(LinearOperator):
'Space' or 'SpaceChannels', defaults to 'Space'
* *backend* (``str``) --
'c' or 'numpy', defaults to 'c' if correlation is 'SpaceChannels' or channels = 1
-
-
+ * *num_threads* (``int``) --
+ If backend is 'c' specify the number of threads to use. Default is number of cpus/2
+
+
Example (2D):
-
.. math::
\nabla : X -> Y \\
@@ -85,7 +91,7 @@ class Gradient(LinearOperator):
if backend == NUMPY:
self.operator = Gradient_numpy(gm_domain, bnd_cond=bnd_cond, **kwargs)
else:
- self.operator = Gradient_C(gm_domain, bnd_cond=bnd_cond)
+ self.operator = Gradient_C(gm_domain, bnd_cond=bnd_cond, **kwargs)
def direct(self, x, out=None):
@@ -183,7 +189,7 @@ class Gradient_numpy(LinearOperator):
# Call FiniteDiff operator
self.FD = FiniteDiff(self.gm_domain, direction = 0, bnd_cond = self.bnd_cond)
-
+ print("Initialised GradientOperator with numpy backend")
def direct(self, x, out=None):
@@ -275,7 +281,8 @@ class Gradient_numpy(LinearOperator):
spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond)
res.append(spMat.sum_abs_col())
return BlockDataContainer(*res)
-
+
+
import ctypes, platform
# check for the extension
@@ -288,14 +295,13 @@ elif platform.system() == 'Darwin':
else:
raise ValueError('Not supported platform, ', platform.system())
-#print ("dll location", dll)
-cilacc = ctypes.cdll.LoadLibrary(dll)
-#FD = ctypes.CDLL(dll)
+cilacc = ctypes.cdll.LoadLibrary(dll)
c_float_p = ctypes.POINTER(ctypes.c_float)
cilacc.openMPtest.restypes = ctypes.c_int32
+cilacc.openMPtest.argtypes = [ctypes.c_int32]
cilacc.fdiff4D.argtypes = [ctypes.POINTER(ctypes.c_float),
ctypes.POINTER(ctypes.c_float),
@@ -307,6 +313,7 @@ cilacc.fdiff4D.argtypes = [ctypes.POINTER(ctypes.c_float),
ctypes.c_long,
ctypes.c_long,
ctypes.c_int32,
+ ctypes.c_int32,
ctypes.c_int32]
cilacc.fdiff3D.argtypes = [ctypes.POINTER(ctypes.c_float),
@@ -317,6 +324,7 @@ cilacc.fdiff3D.argtypes = [ctypes.POINTER(ctypes.c_float),
ctypes.c_long,
ctypes.c_long,
ctypes.c_int32,
+ ctypes.c_int32,
ctypes.c_int32]
cilacc.fdiff2D.argtypes = [ctypes.POINTER(ctypes.c_float),
@@ -325,6 +333,7 @@ cilacc.fdiff2D.argtypes = [ctypes.POINTER(ctypes.c_float),
ctypes.c_long,
ctypes.c_long,
ctypes.c_int32,
+ ctypes.c_int32,
ctypes.c_int32]
@@ -336,10 +345,12 @@ class Gradient_C(LinearOperator):
on 2D, 3D, 4D ImageData
under Neumann/Periodic boundary conditions'''
- def __init__(self, gm_domain, gm_range=None, bnd_cond = NEUMANN):
+ def __init__(self, gm_domain, gm_range=None, bnd_cond = NEUMANN, **kwargs):
super(Gradient_C, self).__init__()
+ self.num_threads = kwargs.get('num_threads',NUM_THREADS)
+
self.gm_domain = gm_domain
self.gm_range = gm_range
@@ -362,8 +373,9 @@ class Gradient_C(LinearOperator):
self.fd = cilacc.fdiff2D
else:
raise ValueError('Number of dimensions not supported, expected 2, 3 or 4, got {}'.format(len(gm_domain.shape)))
-
-
+ #self.num_threads
+ print("Initialised GradientOperator with C backend running with ", cilacc.openMPtest(self.num_threads)," threads")
+
@staticmethod
def datacontainer_as_c_pointer(x):
ndx = x.as_array()
@@ -377,9 +389,10 @@ class Gradient_C(LinearOperator):
out = self.gm_range.allocate(None)
return_val = True
+ #pass list of all arguments
arg1 = [Gradient_C.datacontainer_as_c_pointer(out.get_item(i))[1] for i in range(self.gm_range.shape[0])]
arg2 = [el for el in x.shape]
- args = arg1 + arg2 + [self.bnd_cond, 1]
+ args = arg1 + arg2 + [self.bnd_cond, 1, self.num_threads]
self.fd(x_p, *args)
if return_val is True:
@@ -397,7 +410,7 @@ class Gradient_C(LinearOperator):
arg1 = [Gradient_C.datacontainer_as_c_pointer(x.get_item(i))[1] for i in range(self.gm_range.shape[0])]
arg2 = [el for el in out.shape]
- args = arg1 + arg2 + [self.bnd_cond, 0]
+ args = arg1 + arg2 + [self.bnd_cond, 0, self.num_threads]
self.fd(out_p, *args)