summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Wrappers/Python/demo/test_cpu_regularizers.py21
-rw-r--r--Wrappers/Python/src/cpu_regularizers.cpp289
-rw-r--r--Wrappers/Python/test/test_cpu_vs_gpu_regularizers.py4
-rw-r--r--Wrappers/Python/test/test_gpu_regularizers.py59
4 files changed, 63 insertions, 310 deletions
diff --git a/Wrappers/Python/demo/test_cpu_regularizers.py b/Wrappers/Python/demo/test_cpu_regularizers.py
index 1d97857..76b9de7 100644
--- a/Wrappers/Python/demo/test_cpu_regularizers.py
+++ b/Wrappers/Python/demo/test_cpu_regularizers.py
@@ -11,10 +11,8 @@ import numpy as np
import os
from enum import Enum
import timeit
-from ccpi.filters.cpu_regularizers_boost import SplitBregman_TV, LLT_model, PatchBased_Regul ,\
- TGV_PD
-from ccpi.filters.cpu_regularizers_cython import TV_ROF_CPU, TV_FGP_CPU
-
+from ccpi.filters.cpu_regularizers_boost import SplitBregman_TV, LLT_model, PatchBased_Regul, TGV_PD
+from ccpi.filters.regularizers import ROF_TV, FGP_TV
###############################################################################
#https://stackoverflow.com/questions/13875989/comparing-image-in-url-to-image-in-filesystem-in-python/13884956#13884956
#NRMSE a normalization of the root of the mean squared error
@@ -127,7 +125,7 @@ imgplot = plt.imshow(splitbregman,\
###################### FGP_TV #########################################
start_time = timeit.default_timer()
-pars = {'algorithm' : TV_FGP_CPU , \
+pars = {'algorithm' : FGP_TV , \
'input' : u0,\
'regularization_parameter':0.07, \
'number_of_iterations' :300 ,\
@@ -137,13 +135,13 @@ pars = {'algorithm' : TV_FGP_CPU , \
'printingOut': 0
}
-fgp = TV_FGP_CPU(pars['input'],
+fgp = FGP_TV(pars['input'],
pars['regularization_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
pars['methodTV'],
pars['nonneg'],
- pars['printingOut'])
+ pars['printingOut'], 'cpu')
rms = rmse(Im, fgp)
pars['rmse'] = rms
@@ -280,18 +278,17 @@ imgplot = plt.imshow(tgv, cmap="gray")
start_time = timeit.default_timer()
-pars = {'algorithm': TV_ROF_CPU , \
+pars = {'algorithm': ROF_TV , \
'input' : u0,\
'regularization_parameter':0.07,\
'marching_step': 0.0025,\
'number_of_iterations': 300
}
-rof = TV_ROF_CPU(pars['input'],
+rof = ROF_TV(pars['input'],
pars['regularization_parameter'],
pars['number_of_iterations'],
- pars['marching_step']
- )
-#tgv = out
+ pars['marching_step'], 'cpu')
+
rms = rmse(Im, rof)
pars['rmse'] = rms
diff --git a/Wrappers/Python/src/cpu_regularizers.cpp b/Wrappers/Python/src/cpu_regularizers.cpp
index b8156d5..88b7c3c 100644
--- a/Wrappers/Python/src/cpu_regularizers.cpp
+++ b/Wrappers/Python/src/cpu_regularizers.cpp
@@ -303,292 +303,6 @@ bp::list SplitBregman_TV(np::ndarray input, double d_mu, int iter, double d_epsi
}
-
-
-//bp::list FGP_TV(np::ndarray input, double d_mu, int iter, double d_epsil, int methTV) {
-
- //// the result is in the following list
- //bp::list result;
-
- //int number_of_dims, dimX, dimY, dimZ, ll, j, count;
- //float *A, *D = NULL, *D_old = NULL, *P1 = NULL, *P2 = NULL, *P3 = NULL, *P1_old = NULL, *P2_old = NULL, *P3_old = NULL, *R1 = NULL, *R2 = NULL, *R3 = NULL;
- //float lambda, tk, tkp1, re, re1, re_old, epsil, funcval;
-
- ////number_of_dims = mxGetNumberOfDimensions(prhs[0]);
- ////dim_array = mxGetDimensions(prhs[0]);
-
- //number_of_dims = input.get_nd();
- //int dim_array[3];
-
- //dim_array[0] = input.shape(0);
- //dim_array[1] = input.shape(1);
- //if (number_of_dims == 2) {
- //dim_array[2] = -1;
- //}
- //else {
- //dim_array[2] = input.shape(2);
- //}
- //// Parameter handling is be done in Python
- /////*Handling Matlab input data*/
- ////if ((nrhs < 2) || (nrhs > 5)) mexErrMsgTxt("At least 2 parameters is required: Image(2D/3D), Regularization parameter. The full list of parameters: Image(2D/3D), Regularization parameter, iterations number, tolerance, penalty type ('iso' or 'l1')");
-
- /////*Handling Matlab input data*/
- ////A = (float *)mxGetData(prhs[0]); /*noisy image (2D/3D) */
- //A = reinterpret_cast<float *>(input.get_data());
-
- ////mu = (float)mxGetScalar(prhs[1]); /* regularization parameter */
- //lambda = (float)d_mu;
-
- ////iter = 35; /* default iterations number */
-
- ////epsil = 0.0001; /* default tolerance constant */
- //epsil = (float)d_epsil;
- ////methTV = 0; /* default isotropic TV penalty */
- ////if ((nrhs == 3) || (nrhs == 4) || (nrhs == 5)) iter = (int)mxGetScalar(prhs[2]); /* iterations number */
- ////if ((nrhs == 4) || (nrhs == 5)) epsil = (float)mxGetScalar(prhs[3]); /* tolerance constant */
- ////if (nrhs == 5) {
- //// char *penalty_type;
- //// penalty_type = mxArrayToString(prhs[4]); /* choosing TV penalty: 'iso' or 'l1', 'iso' is the default */
- //// if ((strcmp(penalty_type, "l1") != 0) && (strcmp(penalty_type, "iso") != 0)) mexErrMsgTxt("Choose TV type: 'iso' or 'l1',");
- //// if (strcmp(penalty_type, "l1") == 0) methTV = 1; /* enable 'l1' penalty */
- //// mxFree(penalty_type);
- ////}
- ////if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgTxt("The input image must be in a single precision"); }
-
- ////plhs[1] = mxCreateNumericMatrix(1, 1, mxSINGLE_CLASS, mxREAL);
- //bp::tuple shape1 = bp::make_tuple(dim_array[0], dim_array[1]);
- //np::dtype dtype = np::dtype::get_builtin<float>();
- //np::ndarray out1 = np::zeros(shape1, dtype);
-
- ////float *funcvalA = (float *)mxGetData(plhs[1]);
- //float * funcvalA = reinterpret_cast<float *>(out1.get_data());
- ////if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) { mexErrMsgTxt("The input image must be in a single precision"); }
-
- ///*Handling Matlab output data*/
- //dimX = dim_array[0]; dimY = dim_array[1]; dimZ = dim_array[2];
-
- //tk = 1.0f;
- //tkp1 = 1.0f;
- //count = 1;
- //re_old = 0.0f;
-
- //if (number_of_dims == 2) {
- //dimZ = 1; /*2D case*/
- ///*D = (float*)mxGetPr(plhs[0] = mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));
- //D_old = (float*)mxGetPr(mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));
- //P1 = (float*)mxGetPr(mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));
- //P2 = (float*)mxGetPr(mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));
- //P1_old = (float*)mxGetPr(mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));
- //P2_old = (float*)mxGetPr(mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));
- //R1 = (float*)mxGetPr(mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));
- //R2 = (float*)mxGetPr(mxCreateNumericArray(2, dim_array, mxSINGLE_CLASS, mxREAL));*/
-
- //bp::tuple shape = bp::make_tuple(dim_array[0], dim_array[1]);
- //np::dtype dtype = np::dtype::get_builtin<float>();
-
-
- //np::ndarray npD = np::zeros(shape, dtype);
- //np::ndarray npD_old = np::zeros(shape, dtype);
- //np::ndarray npP1 = np::zeros(shape, dtype);
- //np::ndarray npP2 = np::zeros(shape, dtype);
- //np::ndarray npP1_old = np::zeros(shape, dtype);
- //np::ndarray npP2_old = np::zeros(shape, dtype);
- //np::ndarray npR1 = np::zeros(shape, dtype);
- //np::ndarray npR2 = np::zeros(shape, dtype);
-
- //D = reinterpret_cast<float *>(npD.get_data());
- //D_old = reinterpret_cast<float *>(npD_old.get_data());
- //P1 = reinterpret_cast<float *>(npP1.get_data());
- //P2 = reinterpret_cast<float *>(npP2.get_data());
- //P1_old = reinterpret_cast<float *>(npP1_old.get_data());
- //P2_old = reinterpret_cast<float *>(npP2_old.get_data());
- //R1 = reinterpret_cast<float *>(npR1.get_data());
- //R2 = reinterpret_cast<float *>(npR2.get_data());
-
- ///* begin iterations */
- //for (ll = 0; ll<iter; ll++) {
- ///* computing the gradient of the objective function */
- //Obj_func2D(A, D, R1, R2, lambda, dimX, dimY);
-
- ///*Taking a step towards minus of the gradient*/
- //Grad_func2D(P1, P2, D, R1, R2, lambda, dimX, dimY);
-
- ///* projection step */
- //Proj_func2D(P1, P2, methTV, dimX, dimY);
-
- ///*updating R and t*/
- //tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f;
- //Rupd_func2D(P1, P1_old, P2, P2_old, R1, R2, tkp1, tk, dimX, dimY);
-
- ///* calculate norm */
- //re = 0.0f; re1 = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++)
- //{
- //re += pow(D[j] - D_old[j], 2);
- //re1 += pow(D[j], 2);
- //}
- //re = sqrt(re) / sqrt(re1);
- //if (re < epsil) count++;
- //if (count > 4) {
- //Obj_func2D(A, D, P1, P2, lambda, dimX, dimY);
- //funcval = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++) funcval += pow(D[j], 2);
- ////funcvalA[0] = sqrt(funcval);
- //float fv = sqrt(funcval);
- //std::memcpy(funcvalA, &fv, sizeof(float));
- //break;
- //}
-
- ///* check that the residual norm is decreasing */
- //if (ll > 2) {
- //if (re > re_old) {
- //Obj_func2D(A, D, P1, P2, lambda, dimX, dimY);
- //funcval = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++) funcval += pow(D[j], 2);
- ////funcvalA[0] = sqrt(funcval);
- //float fv = sqrt(funcval);
- //std::memcpy(funcvalA, &fv, sizeof(float));
- //break;
- //}
- //}
- //re_old = re;
- ///*printf("%f %i %i \n", re, ll, count); */
-
- ///*storing old values*/
- //copyIm(D, D_old, dimX, dimY, dimZ);
- //copyIm(P1, P1_old, dimX, dimY, dimZ);
- //copyIm(P2, P2_old, dimX, dimY, dimZ);
- //tk = tkp1;
-
- ///* calculating the objective function value */
- //if (ll == (iter - 1)) {
- //Obj_func2D(A, D, P1, P2, lambda, dimX, dimY);
- //funcval = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++) funcval += pow(D[j], 2);
- ////funcvalA[0] = sqrt(funcval);
- //float fv = sqrt(funcval);
- //std::memcpy(funcvalA, &fv, sizeof(float));
- //}
- //}
- ////printf("FGP-TV iterations stopped at iteration %i with the function value %f \n", ll, funcvalA[0]);
- //result.append<np::ndarray>(npD);
- //result.append<np::ndarray>(out1);
- //result.append<int>(ll);
- //}
- //if (number_of_dims == 3) {
- ///*D = (float*)mxGetPr(plhs[0] = mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //D_old = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //P1 = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //P2 = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //P3 = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //P1_old = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //P2_old = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //P3_old = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //R1 = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //R2 = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
- //R3 = (float*)mxGetPr(mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));*/
- //bp::tuple shape = bp::make_tuple(dim_array[0], dim_array[1], dim_array[2]);
- //np::dtype dtype = np::dtype::get_builtin<float>();
-
- //np::ndarray npD = np::zeros(shape, dtype);
- //np::ndarray npD_old = np::zeros(shape, dtype);
- //np::ndarray npP1 = np::zeros(shape, dtype);
- //np::ndarray npP2 = np::zeros(shape, dtype);
- //np::ndarray npP3 = np::zeros(shape, dtype);
- //np::ndarray npP1_old = np::zeros(shape, dtype);
- //np::ndarray npP2_old = np::zeros(shape, dtype);
- //np::ndarray npP3_old = np::zeros(shape, dtype);
- //np::ndarray npR1 = np::zeros(shape, dtype);
- //np::ndarray npR2 = np::zeros(shape, dtype);
- //np::ndarray npR3 = np::zeros(shape, dtype);
-
- //D = reinterpret_cast<float *>(npD.get_data());
- //D_old = reinterpret_cast<float *>(npD_old.get_data());
- //P1 = reinterpret_cast<float *>(npP1.get_data());
- //P2 = reinterpret_cast<float *>(npP2.get_data());
- //P3 = reinterpret_cast<float *>(npP3.get_data());
- //P1_old = reinterpret_cast<float *>(npP1_old.get_data());
- //P2_old = reinterpret_cast<float *>(npP2_old.get_data());
- //P3_old = reinterpret_cast<float *>(npP3_old.get_data());
- //R1 = reinterpret_cast<float *>(npR1.get_data());
- //R2 = reinterpret_cast<float *>(npR2.get_data());
- //R3 = reinterpret_cast<float *>(npR3.get_data());
- ///* begin iterations */
- //for (ll = 0; ll<iter; ll++) {
- ///* computing the gradient of the objective function */
- //Obj_func3D(A, D, R1, R2, R3, lambda, dimX, dimY, dimZ);
- ///*Taking a step towards minus of the gradient*/
- //Grad_func3D(P1, P2, P3, D, R1, R2, R3, lambda, dimX, dimY, dimZ);
-
- ///* projection step */
- //Proj_func3D(P1, P2, P3, dimX, dimY, dimZ);
-
- ///*updating R and t*/
- //tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f;
- //Rupd_func3D(P1, P1_old, P2, P2_old, P3, P3_old, R1, R2, R3, tkp1, tk, dimX, dimY, dimZ);
-
- ///* calculate norm - stopping rules*/
- //re = 0.0f; re1 = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++)
- //{
- //re += pow(D[j] - D_old[j], 2);
- //re1 += pow(D[j], 2);
- //}
- //re = sqrt(re) / sqrt(re1);
- ///* stop if the norm residual is less than the tolerance EPS */
- //if (re < epsil) count++;
- //if (count > 3) {
- //Obj_func3D(A, D, P1, P2, P3, lambda, dimX, dimY, dimZ);
- //funcval = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++) funcval += pow(D[j], 2);
- ////funcvalA[0] = sqrt(funcval);
- //float fv = sqrt(funcval);
- //std::memcpy(funcvalA, &fv, sizeof(float));
- //break;
- //}
-
- ///* check that the residual norm is decreasing */
- //if (ll > 2) {
- //if (re > re_old) {
- //Obj_func3D(A, D, P1, P2, P3, lambda, dimX, dimY, dimZ);
- //funcval = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++) funcval += pow(D[j], 2);
- ////funcvalA[0] = sqrt(funcval);
- //float fv = sqrt(funcval);
- //std::memcpy(funcvalA, &fv, sizeof(float));
- //break;
- //}
- //}
-
- //re_old = re;
- ///*printf("%f %i %i \n", re, ll, count); */
-
- ///*storing old values*/
- //copyIm(D, D_old, dimX, dimY, dimZ);
- //copyIm(P1, P1_old, dimX, dimY, dimZ);
- //copyIm(P2, P2_old, dimX, dimY, dimZ);
- //copyIm(P3, P3_old, dimX, dimY, dimZ);
- //tk = tkp1;
-
- //if (ll == (iter - 1)) {
- //Obj_func3D(A, D, P1, P2, P3, lambda, dimX, dimY, dimZ);
- //funcval = 0.0f;
- //for (j = 0; j<dimX*dimY*dimZ; j++) funcval += pow(D[j], 2);
- ////funcvalA[0] = sqrt(funcval);
- //float fv = sqrt(funcval);
- //std::memcpy(funcvalA, &fv, sizeof(float));
- //}
-
- //}
- ////printf("FGP-TV iterations stopped at iteration %i with the function value %f \n", ll, funcvalA[0]);
- //result.append<np::ndarray>(npD);
- //result.append<np::ndarray>(out1);
- //result.append<int>(ll);
- //}
-
- //return result;
-//}
-
bp::list LLT_model(np::ndarray input, double d_lambda, double d_tau, int iter, double d_epsil, int switcher) {
// the result is in the following list
bp::list result;
@@ -1022,9 +736,6 @@ bp::list TGV_PD(np::ndarray input, double d_lambda, double d_alpha1, double d_al
result.append<np::ndarray>(npU);
}
-
-
-
return result;
}
diff --git a/Wrappers/Python/test/test_cpu_vs_gpu_regularizers.py b/Wrappers/Python/test/test_cpu_vs_gpu_regularizers.py
index a9d0f31..63be1a0 100644
--- a/Wrappers/Python/test/test_cpu_vs_gpu_regularizers.py
+++ b/Wrappers/Python/test/test_cpu_vs_gpu_regularizers.py
@@ -3,7 +3,7 @@
"""
Created on Thu Feb 22 11:39:43 2018
-Testing CPU implementation against GPU one
+Testing CPU implementation against the GPU one
@author: Daniil Kazantsev
"""
@@ -12,8 +12,6 @@ import matplotlib.pyplot as plt
import numpy as np
import os
import timeit
-#from ccpi.filters.cpu_regularizers_cython import TV_ROF_CPU, TV_FGP_CPU
-#from ccpi.filters.gpu_regularizers import TV_ROF_GPU, TV_FGP_GPU
from ccpi.filters.regularizers import ROF_TV, FGP_TV
###############################################################################
diff --git a/Wrappers/Python/test/test_gpu_regularizers.py b/Wrappers/Python/test/test_gpu_regularizers.py
index 04aeeb4..640b3f9 100644
--- a/Wrappers/Python/test/test_gpu_regularizers.py
+++ b/Wrappers/Python/test/test_gpu_regularizers.py
@@ -11,8 +11,8 @@ import numpy as np
import os
from enum import Enum
import timeit
-from ccpi.filters.gpu_regularizers import Diff4thHajiaboli, NML, TV_ROF_GPU
-
+from ccpi.filters.gpu_regularizers import Diff4thHajiaboli, NML
+from ccpi.filters.regularizers import ROF_TV, FGP_TV
###############################################################################
def printParametersToString(pars):
txt = r''
@@ -151,20 +151,19 @@ plt.colorbar(ticks=[0, 0.03], orientation='vertical')
## Rudin-Osher-Fatemi (ROF) TV regularization
start_time = timeit.default_timer()
-
pars = {
-'algorithm' : TV_ROF_GPU , \
+'algorithm' : ROF_TV , \
'input' : u0,
'regularization_parameter': 0.04,\
'number_of_iterations':300,\
'time_marching_parameter': 0.0025
}
-
+
rof_tv = TV_ROF_GPU(pars['input'],
pars['regularization_parameter'],
pars['number_of_iterations'],
- pars['time_marching_parameter'])
+ pars['time_marching_parameter'],'gpu')
rms = rmse(Im, rof_tv)
pars['rmse'] = rms
@@ -190,3 +189,51 @@ a.text(0.05, 0.95, 'rof_tv - u0', transform=a.transAxes, fontsize=14,
imgplot = plt.imshow((rof_tv - u0)**2, vmin=0, vmax=0.03, cmap="gray")
plt.colorbar(ticks=[0, 0.03], orientation='vertical')
plt.show()
+
+## Fast-Gradient Projection TV regularization
+"""
+start_time = timeit.default_timer()
+
+pars = {'algorithm' : FGP_TV, \
+ 'input' : u0,\
+ 'regularization_parameter':0.04, \
+ 'number_of_iterations' :1200 ,\
+ 'tolerance_constant':0.00001,\
+ 'methodTV': 0 ,\
+ 'nonneg': 0 ,\
+ 'printingOut': 0
+ }
+
+fgp_gpu = FGP_TV(pars['input'],
+ pars['regularization_parameter'],
+ pars['number_of_iterations'],
+ pars['tolerance_constant'],
+ pars['methodTV'],
+ pars['nonneg'],
+ pars['printingOut'],'gpu')
+
+rms = rmse(Im, fgp_gpu)
+pars['rmse'] = rms
+txtstr = printParametersToString(pars)
+txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
+print (txtstr)
+a=fig.add_subplot(2,4,4)
+
+# these are matplotlib.patch.Patch properties
+props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
+# place a text box in upper left in axes coords
+a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=12,
+ verticalalignment='top', bbox=props)
+imgplot = plt.imshow(fgp_gpu, cmap="gray")
+
+a=fig.add_subplot(2,4,8)
+
+# these are matplotlib.patch.Patch properties
+props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
+# place a text box in upper left in axes coords
+a.text(0.05, 0.95, 'fgp_gpu - u0', transform=a.transAxes, fontsize=14,
+ verticalalignment='top', bbox=props)
+imgplot = plt.imshow((fgp_gpu - u0)**2, vmin=0, vmax=0.03, cmap="gray")
+plt.colorbar(ticks=[0, 0.03], orientation='vertical')
+plt.show()
+"""