diff options
author | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-24 10:49:41 +0100 |
---|---|---|
committer | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-24 10:49:41 +0100 |
commit | b0a2d779602df9f8abfcb68083f9de0df43ed5b0 (patch) | |
tree | afdc34e94afe3ff41d9130feebc337aa759d85b3 | |
parent | f12e27ba088462858f35315193cf6bf624325d1e (diff) | |
download | framework-b0a2d779602df9f8abfcb68083f9de0df43ed5b0.tar.gz framework-b0a2d779602df9f8abfcb68083f9de0df43ed5b0.tar.bz2 framework-b0a2d779602df9f8abfcb68083f9de0df43ed5b0.tar.xz framework-b0a2d779602df9f8abfcb68083f9de0df43ed5b0.zip |
fix TGV example
4 files changed, 130 insertions, 96 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 7631e29..110f998 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -104,6 +104,7 @@ class PDHG(Algorithm): self.y_old = self.y def update_objective(self): + p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) d1 = -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(-1*self.operator.adjoint(self.y))) @@ -166,7 +167,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old.fill(x) y_old.fill(y) - if i%10==0: + if i%50==0: p1 = f(operator.direct(x)) + g(x) d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) @@ -194,8 +195,8 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old.fill(x) y_old.fill(y) - - if i%10==0: + + if i%50==0: p1 = f(operator.direct(x)) + g(x) d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py index 3c15fa1..243809b 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py @@ -69,9 +69,8 @@ class SymmetrizedGradient(Gradient): self.FD.direction = i self.FD.adjoint(x.get_item(j), out=out[ind]) ind+=1 - out1 = [out[i] for i in self.order_ind] - res = [0.5 * sum(x) for x in zip(out, out1)] - out.fill(BlockDataContainer(*res)) + out1 = BlockDataContainer(*[out[i] for i in self.order_ind]) + out.fill( 0.5 * (out + out1) ) def adjoint(self, x, out=None): @@ -102,10 +101,11 @@ class SymmetrizedGradient(Gradient): self.FD.direct(x[i], out=tmp[j]) i+=1 tmp1+=tmp[j] - out[k].fill(tmp1) + out[k].fill(tmp1) +# tmp = self.adjoint(x) +# out.fill(tmp) - - + def domain_geometry(self): return self.gm_domain @@ -114,12 +114,12 @@ class SymmetrizedGradient(Gradient): def norm(self): - #TODO need dot method for BlockDataContainer - return numpy.sqrt(4*self.gm_domain.shape[0]) +# TODO need dot method for BlockDataContainer +# return numpy.sqrt(4*self.gm_domain.shape[0]) -# x0 = self.gm_domain.allocate('random_int') -# self.s1, sall, svec = LinearOperator.PowerMethod(self, 10, x0) -# return self.s1 +# x0 = self.gm_domain.allocate('random') + self.s1, sall, svec = LinearOperator.PowerMethod(self, 50) + return self.s1 @@ -226,6 +226,13 @@ if __name__ == '__main__': print(LHS_out, RHS_out) + out = E1.range_geometry().allocate() + E1.direct(u1, out=out) + E1.adjoint(out, out=out1) + + print(E1.norm()) + print(E2.norm()) + diff --git a/Wrappers/Python/wip/pdhg_TGV_denoising.py b/Wrappers/Python/wip/pdhg_TGV_denoising.py index 0b6e594..1c570cb 100644 --- a/Wrappers/Python/wip/pdhg_TGV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TGV_denoising.py @@ -105,100 +105,126 @@ normK = operator.norm() sigma = 1 tau = 1/(sigma*normK**2) ## -opt = {'niter':2000} -opt1 = {'niter':2000, 'memopt': True} +opt = {'niter':500} +opt1 = {'niter':500, 'memopt': True} # t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) t2 = timer() # +t3 = timer() +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +t4 = timer() +# +plt.figure(figsize=(15,15)) +plt.subplot(3,1,1) plt.imshow(res[0].as_array()) +plt.title('no memopt') +plt.colorbar() +plt.subplot(3,1,2) +plt.imshow(res1[0].as_array()) +plt.title('memopt') +plt.colorbar() +plt.subplot(3,1,3) +plt.imshow((res1[0] - res[0]).abs().as_array()) +plt.title('diff') +plt.colorbar() plt.show() +print("NoMemopt/Memopt is {}/{}".format(t2-t1, t4-t3)) + + +###### + +#%% -#t3 = timer() -#res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) -#t4 = timer() +#def update_plot(it_update, objective, x): +# +## sol = pdhg.get_output() +# plt.figure() +# plt.imshow(x[0].as_array()) +# plt.show() +# +# +##def stop_criterion(x,) # -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(res[0].as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res1[0].as_array()) -#plt.title('memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((res1[0] - res[0]).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 # -#print("NoMemopt/Memopt is {}/{}".format(t2-t1, t4-t3)) - +#pdhg.run(4000, verbose=False, callback=update_plot) -#%% Check with CVX solution -from ccpi.optimisation.operators import SparseFiniteDiff +#%% -try: - from cvxpy import * - cvx_not_installable = True -except ImportError: - cvx_not_installable = False -if cvx_not_installable: - - u = Variable(ig.shape) - w1 = Variable((N, N)) - w2 = Variable((N, N)) - - # create TGV regulariser - DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann') - DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann') - - regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u) - vec(w1), \ - DY.matrix() * vec(u) - vec(w2)]), 2, axis = 0)) + \ - beta * sum(norm(vstack([ DX.matrix().transpose() * vec(w1), DY.matrix().transpose() * vec(w2), \ - 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ), \ - 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ) ]), 2, axis = 0 ) ) - - constraints = [] - fidelity = 0.5 * sum_squares(u - noisy_data.as_array()) - solver = MOSEK - obj = Minimize( regulariser + fidelity) - prob = Problem(obj) - result = prob.solve(verbose = True, solver = solver) - - diff_cvx = numpy.abs( res[0].as_array() - u.value ) - - # Show result - plt.figure(figsize=(15,15)) - plt.subplot(3,1,1) - plt.imshow(res[0].as_array()) - plt.title('PDHG solution') - plt.colorbar() - - plt.subplot(3,1,2) - plt.imshow(u.value) - plt.title('CVX solution') - plt.colorbar() - - plt.subplot(3,1,3) - plt.imshow(diff_cvx) - plt.title('Difference') - plt.colorbar() - plt.show() - - plt.plot(np.linspace(0,N,N), res[0].as_array()[int(N/2),:], label = 'PDHG') - plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX') - plt.legend() - - print('Primal Objective (CVX) {} '.format(obj.value)) - print('Primal Objective (PDHG) {} '.format(primal[-1])) - print('Min/Max of absolute difference {}/{}'.format(diff_cvx.min(), diff_cvx.max())) - + + + + + +#%% Check with CVX solution + +#from ccpi.optimisation.operators import SparseFiniteDiff +# +#try: +# from cvxpy import * +# cvx_not_installable = True +#except ImportError: +# cvx_not_installable = False +# +#if cvx_not_installable: +# +# u = Variable(ig.shape) +# w1 = Variable((N, N)) +# w2 = Variable((N, N)) +# +# # create TGV regulariser +# DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann') +# DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann') +# +# regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u) - vec(w1), \ +# DY.matrix() * vec(u) - vec(w2)]), 2, axis = 0)) + \ +# beta * sum(norm(vstack([ DX.matrix().transpose() * vec(w1), DY.matrix().transpose() * vec(w2), \ +# 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ), \ +# 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ) ]), 2, axis = 0 ) ) +# +# constraints = [] +# fidelity = 0.5 * sum_squares(u - noisy_data.as_array()) +# solver = MOSEK +# +# obj = Minimize( regulariser + fidelity) +# prob = Problem(obj) +# result = prob.solve(verbose = True, solver = solver) +# +# diff_cvx = numpy.abs( res[0].as_array() - u.value ) +# +# # Show result +# plt.figure(figsize=(15,15)) +# plt.subplot(3,1,1) +# plt.imshow(res[0].as_array()) +# plt.title('PDHG solution') +# plt.colorbar() +# +# plt.subplot(3,1,2) +# plt.imshow(u.value) +# plt.title('CVX solution') +# plt.colorbar() +# +# plt.subplot(3,1,3) +# plt.imshow(diff_cvx) +# plt.title('Difference') +# plt.colorbar() +# plt.show() +# +# plt.plot(np.linspace(0,N,N), res[0].as_array()[int(N/2),:], label = 'PDHG') +# plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX') +# plt.legend() +# +# print('Primal Objective (CVX) {} '.format(obj.value)) +# print('Primal Objective (PDHG) {} '.format(primal[-1])) +# print('Min/Max of absolute difference {}/{}'.format(diff_cvx.min(), diff_cvx.max())) +# diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index 2713cfd..91c48c7 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -8,16 +8,16 @@ Created on Fri Feb 22 14:53:03 2019 @author: evangelos """ -from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, AcquisitionGeometry, AcquisitionData +from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData import numpy as np import matplotlib.pyplot as plt from ccpi.optimisation.algorithms import PDHG, PDHG_old -from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.operators import BlockOperator, Gradient from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ - MixedL21Norm, BlockFunction, ScaledFunction + MixedL21Norm, BlockFunction from ccpi.astra.ops import AstraProjectorSimple from skimage.util import random_noise |