diff options
author | Daniil Kazantsev <dkazanc3@googlemail.com> | 2018-12-19 15:42:38 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-12-19 15:42:38 +0000 |
commit | 07fb80445f83758e4aed94a461cf1cf2b869318a (patch) | |
tree | e93c03bcfbe2eb88a13cdd42edaea045f7f13c06 /Wrappers/Python | |
parent | c04b85a6fdd8c63e3363c8072cbfe4b97409dc60 (diff) | |
parent | ec59b600885a1c7a60e1b528f3d09588aa972609 (diff) | |
download | regularization-07fb80445f83758e4aed94a461cf1cf2b869318a.tar.gz regularization-07fb80445f83758e4aed94a461cf1cf2b869318a.tar.bz2 regularization-07fb80445f83758e4aed94a461cf1cf2b869318a.tar.xz regularization-07fb80445f83758e4aed94a461cf1cf2b869318a.zip |
Merge pull request #80 from vais-ral/dev-jenkins
Dev jenkins
Diffstat (limited to 'Wrappers/Python')
-rw-r--r-- | Wrappers/Python/conda-recipe/build.sh | 2 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/meta.yaml | 2 | ||||
-rwxr-xr-x | Wrappers/Python/conda-recipe/run_test.py | 67 | ||||
-rw-r--r-- | Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py | 4 | ||||
-rw-r--r-- | Wrappers/Python/src/gpu_regularisers.pyx | 156 |
5 files changed, 136 insertions, 95 deletions
diff --git a/Wrappers/Python/conda-recipe/build.sh b/Wrappers/Python/conda-recipe/build.sh index 54bc8e2..eec7c2f 100644 --- a/Wrappers/Python/conda-recipe/build.sh +++ b/Wrappers/Python/conda-recipe/build.sh @@ -4,7 +4,7 @@ cp -rv "$RECIPE_DIR/../.." "$SRC_DIR/ccpi" cp -rv "$RECIPE_DIR/../../../Core" "$SRC_DIR/Core" cd $SRC_DIR - +##cuda=off cmake -G "Unix Makefiles" $RECIPE_DIR/../../../ -DBUILD_PYTHON_WRAPPER=ON -DCONDA_BUILD=ON -DBUILD_CUDA=ON -DCMAKE_BUILD_TYPE="Release" -DLIBRARY_LIB=$CONDA_PREFIX/lib -DLIBRARY_INC=$CONDA_PREFIX -DCMAKE_INSTALL_PREFIX=$PREFIX diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index ed73165..808493e 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: ccpi-regulariser - version: 0.10.2 + version: 0.10.3 build: diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index 499ae7f..cfb3f53 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -2,7 +2,7 @@ import unittest import numpy as np
import os
import timeit
-from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, TGV, LLT_ROF, FGP_dTV, NDF, DIFF4th +from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, TGV, LLT_ROF, FGP_dTV, NDF, DIFF4th
from PIL import Image
class TiffReader(object):
@@ -37,6 +37,8 @@ class TestRegularisers(unittest.TestCase): def test_ROF_TV_CPU_vs_GPU(self):
+ #print ("tomas debug test function")
+ print(__name__)
filename = os.path.join("lena_gray_512.tif")
plt = TiffReader()
# read image
@@ -63,11 +65,11 @@ class TestRegularisers(unittest.TestCase): # set parameters
pars = {'algorithm': ROF_TV, \
- 'input' : u0,\
- 'regularisation_parameter':0.04,\
- 'number_of_iterations': 1000,\
- 'time_marching_parameter': 0.0001
- }
+ 'input' : u0,\
+ 'regularisation_parameter':0.04,\
+ 'number_of_iterations': 2500,\
+ 'time_marching_parameter': 0.00002
+ }
print ("#############ROF TV CPU####################")
start_time = timeit.default_timer()
rof_cpu = ROF_TV(pars['input'],
@@ -88,8 +90,8 @@ class TestRegularisers(unittest.TestCase): pars['number_of_iterations'],
pars['time_marching_parameter'],'gpu')
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
+
rms = rmse(Im, rof_gpu)
pars['rmse'] = rms
pars['algorithm'] = ROF_TV
@@ -101,10 +103,10 @@ class TestRegularisers(unittest.TestCase): diff_im = np.zeros(np.shape(rof_cpu))
diff_im = abs(rof_cpu - rof_gpu)
diff_im[diff_im > tolerance] = 1
-
self.assertLessEqual(diff_im.sum() , 1)
def test_FGP_TV_CPU_vs_GPU(self):
+ print(__name__)
filename = os.path.join("lena_gray_512.tif")
plt = TiffReader()
# read image
@@ -169,10 +171,10 @@ class TestRegularisers(unittest.TestCase): pars['methodTV'],
pars['nonneg'],
pars['printingOut'],'gpu')
-
+
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
+
rms = rmse(Im, fgp_gpu)
pars['rmse'] = rms
pars['algorithm'] = FGP_TV
@@ -189,6 +191,7 @@ class TestRegularisers(unittest.TestCase): self.assertLessEqual(diff_im.sum() , 1)
def test_SB_TV_CPU_vs_GPU(self):
+ print(__name__)
filename = os.path.join("lena_gray_512.tif")
plt = TiffReader()
# read image
@@ -251,10 +254,10 @@ class TestRegularisers(unittest.TestCase): pars['tolerance_constant'],
pars['methodTV'],
pars['printingOut'],'gpu')
-
+
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
+
rms = rmse(Im, sb_gpu)
pars['rmse'] = rms
pars['algorithm'] = SB_TV
@@ -269,6 +272,7 @@ class TestRegularisers(unittest.TestCase): self.assertLessEqual(diff_im.sum(), 1)
def test_TGV_CPU_vs_GPU(self):
+ print(__name__)
filename = os.path.join("lena_gray_512.tif")
plt = TiffReader()
# read image
@@ -329,10 +333,10 @@ class TestRegularisers(unittest.TestCase): pars['alpha0'],
pars['number_of_iterations'],
pars['LipshitzConstant'],'gpu')
-
+
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
+
rms = rmse(Im, tgv_gpu)
pars['rmse'] = rms
pars['algorithm'] = TGV
@@ -347,6 +351,7 @@ class TestRegularisers(unittest.TestCase): self.assertLessEqual(diff_im.sum() , 1)
def test_LLT_ROF_CPU_vs_GPU(self):
+ print(__name__)
filename = os.path.join("lena_gray_512.tif")
plt = TiffReader()
# read image
@@ -405,8 +410,8 @@ class TestRegularisers(unittest.TestCase): pars['time_marching_parameter'],'gpu')
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
+
rms = rmse(Im, lltrof_gpu)
pars['rmse'] = rms
pars['algorithm'] = LLT_ROF
@@ -421,6 +426,7 @@ class TestRegularisers(unittest.TestCase): self.assertLessEqual(diff_im.sum(), 1)
def test_NDF_CPU_vs_GPU(self):
+ print(__name__)
filename = os.path.join("lena_gray_512.tif")
plt = TiffReader()
# read image
@@ -483,8 +489,7 @@ class TestRegularisers(unittest.TestCase): pars['penalty_type'],'gpu')
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
rms = rmse(Im, ndf_gpu)
pars['rmse'] = rms
pars['algorithm'] = NDF
@@ -557,8 +562,7 @@ class TestRegularisers(unittest.TestCase): pars['time_marching_parameter'], 'gpu')
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
rms = rmse(Im, diff4th_gpu)
pars['rmse'] = rms
pars['algorithm'] = DIFF4th
@@ -603,8 +607,8 @@ class TestRegularisers(unittest.TestCase): 'input' : u0,\
'refdata' : u_ref,\
'regularisation_parameter':0.04, \
- 'number_of_iterations' :2000 ,\
- 'tolerance_constant':1e-06,\
+ 'number_of_iterations' :1000 ,\
+ 'tolerance_constant':1e-07,\
'eta_const':0.2,\
'methodTV': 0 ,\
'nonneg': 0 ,\
@@ -643,8 +647,7 @@ class TestRegularisers(unittest.TestCase): pars['nonneg'],
pars['printingOut'],'gpu')
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
rms = rmse(Im, fgp_dtv_gpu)
pars['rmse'] = rms
pars['algorithm'] = FGP_dTV
@@ -765,8 +768,8 @@ class TestRegularisers(unittest.TestCase): pars_rof_tv['number_of_iterations'],
pars_rof_tv['time_marching_parameter'],'gpu')
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
+
rms_rof = rmse(Im, rof_gpu)
# now compare obtained rms with the expected value
self.assertLess(abs(rms_rof-rms_rof_exp) , tolerance)
@@ -806,10 +809,10 @@ class TestRegularisers(unittest.TestCase): pars_fgp_tv['nonneg'],
pars_fgp_tv['printingOut'],'gpu')
except ValueError as ve:
- self.assertTrue(True)
- return
+ self.skipTest("Results not comparable. GPU computing error.")
rms_fgp = rmse(Im, fgp_gpu)
# now compare obtained rms with the expected value
+
self.assertLess(abs(rms_fgp-rms_fgp_exp) , tolerance)
if __name__ == '__main__':
diff --git a/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py b/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py index 616eab0..6529b5c 100644 --- a/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py +++ b/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py @@ -656,8 +656,8 @@ pars = {'algorithm' : FGP_dTV, \ 'input' : u0,\ 'refdata' : u_ref,\ 'regularisation_parameter':0.04, \ - 'number_of_iterations' :2000 ,\ - 'tolerance_constant':1e-06,\ + 'number_of_iterations' :1000 ,\ + 'tolerance_constant':1e-07,\ 'eta_const':0.2,\ 'methodTV': 0 ,\ 'nonneg': 0 ,\ diff --git a/Wrappers/Python/src/gpu_regularisers.pyx b/Wrappers/Python/src/gpu_regularisers.pyx index 302727e..2b97865 100644 --- a/Wrappers/Python/src/gpu_regularisers.pyx +++ b/Wrappers/Python/src/gpu_regularisers.pyx @@ -18,15 +18,17 @@ import cython import numpy as np cimport numpy as np -cdef extern void TV_ROF_GPU_main(float* Input, float* Output, float lambdaPar, int iter, float tau, int N, int M, int Z); -cdef extern void TV_FGP_GPU_main(float *Input, float *Output, float lambdaPar, int iter, float epsil, int methodTV, int nonneg, int printM, int N, int M, int Z); -cdef extern void TV_SB_GPU_main(float *Input, float *Output, float lambdaPar, int iter, float epsil, int methodTV, int printM, int N, int M, int Z); -cdef extern void TGV_GPU_main(float *Input, float *Output, float lambdaPar, float alpha1, float alpha0, int iterationsNumb, float L2, int dimX, int dimY); -cdef extern void LLT_ROF_GPU_main(float *Input, float *Output, float lambdaROF, float lambdaLLT, int iterationsNumb, float tau, int N, int M, int Z); -cdef extern void NonlDiff_GPU_main(float *Input, float *Output, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int penaltytype, int N, int M, int Z); -cdef extern void dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float lambdaPar, int iterationsNumb, float epsil, float eta, int methodTV, int nonneg, int printM, int N, int M, int Z); -cdef extern void Diffus4th_GPU_main(float *Input, float *Output, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int N, int M, int Z); -cdef extern void PatchSelect_GPU_main(float *Input, unsigned short *H_i, unsigned short *H_j, float *Weights, int N, int M, int SearchWindow, int SimilarWin, int NumNeighb, float h); +CUDAErrorMessage = 'CUDA error' + +cdef extern int TV_ROF_GPU_main(float* Input, float* Output, float lambdaPar, int iter, float tau, int N, int M, int Z); +cdef extern int TV_FGP_GPU_main(float *Input, float *Output, float lambdaPar, int iter, float epsil, int methodTV, int nonneg, int printM, int N, int M, int Z); +cdef extern int TV_SB_GPU_main(float *Input, float *Output, float lambdaPar, int iter, float epsil, int methodTV, int printM, int N, int M, int Z); +cdef extern int TGV_GPU_main(float *Input, float *Output, float lambdaPar, float alpha1, float alpha0, int iterationsNumb, float L2, int dimX, int dimY); +cdef extern int LLT_ROF_GPU_main(float *Input, float *Output, float lambdaROF, float lambdaLLT, int iterationsNumb, float tau, int N, int M, int Z); +cdef extern int NonlDiff_GPU_main(float *Input, float *Output, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int penaltytype, int N, int M, int Z); +cdef extern int dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float lambdaPar, int iterationsNumb, float epsil, float eta, int methodTV, int nonneg, int printM, int N, int M, int Z); +cdef extern int Diffus4th_GPU_main(float *Input, float *Output, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int N, int M, int Z); +cdef extern int PatchSelect_GPU_main(float *Input, unsigned short *H_i, unsigned short *H_j, float *Weights, int N, int M, int SearchWindow, int SimilarWin, int NumNeighb, float h); # Total-variation Rudin-Osher-Fatemi (ROF) def TV_ROF_GPU(inputData, @@ -186,15 +188,16 @@ def ROFTV2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, cdef np.ndarray[np.float32_t, ndim=2, mode="c"] outputData = \ np.zeros([dims[0],dims[1]], dtype='float32') - # Running CUDA code here - TV_ROF_GPU_main( + # Running CUDA code here + if (TV_ROF_GPU_main( &inputData[0,0], &outputData[0,0], regularisation_parameter, iterations , time_marching_parameter, - dims[1], dims[0], 1); - - return outputData + dims[1], dims[0], 1)==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); def ROFTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, float regularisation_parameter, @@ -210,14 +213,15 @@ def ROFTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, np.zeros([dims[0],dims[1],dims[2]], dtype='float32') # Running CUDA code here - TV_ROF_GPU_main( + if (TV_ROF_GPU_main( &inputData[0,0,0], &outputData[0,0,0], regularisation_parameter, iterations , time_marching_parameter, - dims[2], dims[1], dims[0]); - - return outputData + dims[2], dims[1], dims[0])==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); #****************************************************************# #********************** Total-variation FGP *********************# #****************************************************************# @@ -238,16 +242,18 @@ def FGPTV2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, np.zeros([dims[0],dims[1]], dtype='float32') # Running CUDA code here - TV_FGP_GPU_main(&inputData[0,0], &outputData[0,0], + if (TV_FGP_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, iterations, tolerance_param, methodTV, nonneg, printM, - dims[1], dims[0], 1); - - return outputData + dims[1], dims[0], 1)==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + def FGPTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, float regularisation_parameter, @@ -266,16 +272,18 @@ def FGPTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, np.zeros([dims[0],dims[1],dims[2]], dtype='float32') # Running CUDA code here - TV_FGP_GPU_main(&inputData[0,0,0], &outputData[0,0,0], + if (TV_FGP_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameter , iterations, tolerance_param, methodTV, nonneg, printM, - dims[2], dims[1], dims[0]); - - return outputData + dims[2], dims[1], dims[0])==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + #***************************************************************# #********************** Total-variation SB *********************# #***************************************************************# @@ -295,15 +303,17 @@ def SBTV2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, np.zeros([dims[0],dims[1]], dtype='float32') # Running CUDA code here - TV_SB_GPU_main(&inputData[0,0], &outputData[0,0], + if (TV_SB_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, iterations, tolerance_param, methodTV, printM, - dims[1], dims[0], 1); - - return outputData + dims[1], dims[0], 1)==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + def SBTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, float regularisation_parameter, @@ -321,15 +331,17 @@ def SBTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, np.zeros([dims[0],dims[1],dims[2]], dtype='float32') # Running CUDA code here - TV_SB_GPU_main(&inputData[0,0,0], &outputData[0,0,0], + if (TV_SB_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameter , iterations, tolerance_param, methodTV, printM, - dims[2], dims[1], dims[0]); - - return outputData + dims[2], dims[1], dims[0])==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + #***************************************************************# #************************ LLT-ROF model ************************# @@ -349,8 +361,11 @@ def LLT_ROF_GPU2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, np.zeros([dims[0],dims[1]], dtype='float32') # Running CUDA code here - LLT_ROF_GPU_main(&inputData[0,0], &outputData[0,0],regularisation_parameterROF, regularisation_parameterLLT, iterations, time_marching_parameter, dims[1],dims[0],1); - return outputData + if (LLT_ROF_GPU_main(&inputData[0,0], &outputData[0,0],regularisation_parameterROF, regularisation_parameterLLT, iterations, time_marching_parameter, dims[1],dims[0],1)==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + def LLT_ROF_GPU3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, float regularisation_parameterROF, @@ -367,8 +382,11 @@ def LLT_ROF_GPU3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, np.zeros([dims[0],dims[1],dims[2]], dtype='float32') # Running CUDA code here - LLT_ROF_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameterROF, regularisation_parameterLLT, iterations, time_marching_parameter, dims[2], dims[1], dims[0]); - return outputData + if (LLT_ROF_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameterROF, regularisation_parameterLLT, iterations, time_marching_parameter, dims[2], dims[1], dims[0])==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + #***************************************************************# @@ -389,13 +407,16 @@ def TGV2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, np.zeros([dims[0],dims[1]], dtype='float32') #/* Run TGV iterations for 2D data */ - TGV_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, + if (TGV_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, alpha1, alpha0, iterationsNumb, LipshitzConst, - dims[1],dims[0]) - return outputData + dims[1],dims[0])==0): + return outputData + else: + raise ValueError(CUDAErrorMessage); + #****************************************************************# #**************Directional Total-variation FGP ******************# @@ -419,7 +440,7 @@ def FGPdTV2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, np.zeros([dims[0],dims[1]], dtype='float32') # Running CUDA code here - dTV_FGP_GPU_main(&inputData[0,0], &refdata[0,0], &outputData[0,0], + if (dTV_FGP_GPU_main(&inputData[0,0], &refdata[0,0], &outputData[0,0], regularisation_parameter, iterations, tolerance_param, @@ -427,9 +448,11 @@ def FGPdTV2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, methodTV, nonneg, printM, - dims[1], dims[0], 1); - - return outputData + dims[1], dims[0], 1)==0): + return outputData + else: + raise ValueError(CUDAErrorMessage); + def FGPdTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, np.ndarray[np.float32_t, ndim=3, mode="c"] refdata, @@ -450,7 +473,7 @@ def FGPdTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, np.zeros([dims[0],dims[1],dims[2]], dtype='float32') # Running CUDA code here - dTV_FGP_GPU_main(&inputData[0,0,0], &refdata[0,0,0], &outputData[0,0,0], + if (dTV_FGP_GPU_main(&inputData[0,0,0], &refdata[0,0,0], &outputData[0,0,0], regularisation_parameter , iterations, tolerance_param, @@ -458,8 +481,11 @@ def FGPdTV3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, methodTV, nonneg, printM, - dims[2], dims[1], dims[0]); - return outputData + dims[2], dims[1], dims[0])==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + #****************************************************************# #***************Nonlinear (Isotropic) Diffusion******************# @@ -483,8 +509,11 @@ def NDF_GPU_2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, # Run Nonlinear Diffusion iterations for 2D data # Running CUDA code here - NonlDiff_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, penalty_type, dims[1], dims[0], 1) - return outputData + if (NonlDiff_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, penalty_type, dims[1], dims[0], 1)==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); + def NDF_GPU_3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, float regularisation_parameter, @@ -502,9 +531,11 @@ def NDF_GPU_3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, # Run Nonlinear Diffusion iterations for 3D data # Running CUDA code here - NonlDiff_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, penalty_type, dims[2], dims[1], dims[0]) + if (NonlDiff_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, penalty_type, dims[2], dims[1], dims[0])==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); - return outputData #****************************************************************# #************Anisotropic Fourth-Order diffusion******************# #****************************************************************# @@ -522,8 +553,11 @@ def Diff4th_2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, # Run Anisotropic Fourth-Order diffusion for 2D data # Running CUDA code here - Diffus4th_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, dims[1], dims[0], 1) - return outputData + if (Diffus4th_GPU_main(&inputData[0,0], &outputData[0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, dims[1], dims[0], 1)==0): + return outputData + else: + raise ValueError(CUDAErrorMessage); + def Diff4th_3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, float regularisation_parameter, @@ -540,9 +574,11 @@ def Diff4th_3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, # Run Anisotropic Fourth-Order diffusion for 3D data # Running CUDA code here - Diffus4th_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, dims[2], dims[1], dims[0]) + if (Diffus4th_GPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameter, edge_parameter, iterationsNumb, time_marching_parameter, dims[2], dims[1], dims[0])==0): + return outputData; + else: + raise ValueError(CUDAErrorMessage); - return outputData #****************************************************************# #************Patch-based weights pre-selection******************# #****************************************************************# @@ -571,6 +607,8 @@ def PatchSel_2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, np.zeros([dims[0], dims[1],dims[2]], dtype='uint16') # Run patch-based weight selection function - PatchSelect_GPU_main(&inputData[0,0], &H_j[0,0,0], &H_i[0,0,0], &Weights[0,0,0], dims[2], dims[1], searchwindow, patchwindow, neighbours, edge_parameter) - - return H_i, H_j, Weights + if (PatchSelect_GPU_main(&inputData[0,0], &H_j[0,0,0], &H_i[0,0,0], &Weights[0,0,0], dims[2], dims[1], searchwindow, patchwindow, neighbours, edge_parameter)==0): + return H_i, H_j, Weights; + else: + raise ValueError(CUDAErrorMessage); + |