diff options
Diffstat (limited to 'Wrappers/Python')
11 files changed, 521 insertions, 114 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index e9bd801..3b81d98 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -111,7 +111,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x = x_old y_tmp = y_old - y = y_tmp + y = y_old # relaxation parameter theta = 1 @@ -125,35 +125,82 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): for i in range(niter): -# # Gradient descent, Dual problem solution -# y_tmp = y_old + sigma * operator.direct(xbar) - y_tmp = operator.direct(xbar) - y_tmp *= sigma - y_tmp +=y_old - - y = f.proximal_conjugate(y_tmp, sigma) - - # Gradient ascent, Primal problem solution -# x_tmp = x_old - tau * operator.adjoint(y) - - x_tmp = operator.adjoint(y) - x_tmp *=-tau - x_tmp +=x_old + if not memopt: + + y_tmp = y_old + sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_tmp, sigma) + + x_tmp = x_old - tau * operator.adjoint(y) + x = g.proximal(x_tmp, tau) + + xbar = x + theta * (x - x_old) + + x_old = x + y_old = y - x = g.proximal(x_tmp, tau) - #Update -# xbar = x + theta * (x - x_old) - xbar = x - x_old - xbar *= theta - xbar += x - - x_old = x - y_old = y - -# operator.direct(xbar, out = y_tmp) + else: + +# operator.direct(xbar, out = y_tmp) +# y_tmp.__imul__(sigma) +# y_tmp.__iadd__(y_old) + +# y_tmp *= sigma +# y_tmp += y_old + + y_tmp = y_old + sigma * operator.direct(xbar) + f.proximal_conjugate(y_tmp, sigma, out=y) + + x_tmp = x_old - tau * operator.adjoint(y) + +# operator.adjoint(y, out = x_tmp) +# z = x_tmp +# x_tmp = x_old - tau * z + +# x_tmp *= -tau +# x_tmp += x_old + + g.proximal(x_tmp, tau, out = x) + + xbar = x - x_old + xbar *= theta + xbar += x + + + +# pass +# +## # Gradient descent, Dual problem solution +## y_tmp = y_old + sigma * operator.direct(xbar) +# y_tmp = operator.direct(xbar) # y_tmp *= sigma -# y_tmp +=y_old +# y_tmp +=y_old +# +# y = f.proximal_conjugate(y_tmp, sigma) +## f.proximal_conjugate(y_tmp, sigma, out = y) +# +# # Gradient ascent, Primal problem solution +## x_tmp = x_old - tau * operator.adjoint(y) +# +# x_tmp = operator.adjoint(y) +# x_tmp *=-tau +# x_tmp +=x_old +# +# x = g.proximal(x_tmp, tau) +## g.proximal(x_tmp, tau, out = x) +# +# #Update +## xbar = x + theta * (x - x_old) +# xbar = x - x_old +# xbar *= theta +# xbar += x +# +# x_old = x +# y_old = y +# +## operator.direct(xbar, out = y_tmp) +## y_tmp *= sigma +## y_tmp +=y_old diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 81c16cd..a74a215 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -13,6 +13,7 @@ from ccpi.framework import BlockDataContainer from numbers import Number class BlockFunction(Function): + '''A Block vector of Functions .. math:: @@ -52,15 +53,29 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): '''proximal_conjugate does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + if out is None: + + out = [None]*self.length + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) + + return BlockDataContainer(*out) + else: - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) + + if isinstance(tau, Number): + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau, out = out.get_item(i)) + else: + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i), out = out) + + def proximal(self, x, tau, out = None): '''proximal does not take into account the BlockOperator''' @@ -76,4 +91,90 @@ class BlockFunction(Function): def gradient(self,x, out=None): '''FIXME: gradient returns pass''' - pass
\ No newline at end of file + pass + + +if __name__ == '__main__': + + M, N, K = 2,3,5 + + from ccpi.optimisation.functions import L2NormSquared, MixedL21Norm + from ccpi.framework import ImageGeometry, BlockGeometry + from ccpi.optimisation.operators import Gradient, Identity, BlockOperator + import numpy + + + ig = ImageGeometry(M, N) + BG = BlockGeometry(ig, ig) + + u = ig.allocate('random_int') + B = BlockOperator( Gradient(ig), Identity(ig) ) + + U = B.direct(u) + b = ig.allocate('random_int') + + f1 = 10 * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b=b) + + f = BlockFunction(f1, f2) + tau = 0.3 + + print( " without out " ) + res_no_out = f.proximal_conjugate( U, tau) + res_out = B.range_geometry().allocate() + f.proximal_conjugate( U, tau, out = res_out) + + numpy.testing.assert_array_almost_equal(res_no_out[0].as_array(), \ + res_out[0].as_array(), decimal=4) + + + + + + + + ########################################################################## + + + + + + + +# zzz = B.range_geometry().allocate('random_int') +# www = B.range_geometry().allocate() +# www.fill(zzz) + +# res[0].fill(z) + + + + +# f.proximal_conjugate(z, sigma, out = res) + +# print(z1[0][0].as_array()) +# print(res[0][0].as_array()) + + + + +# U = BG.allocate('random_int') +# RES = BG.allocate() +# f = BlockFunction(f1, f2) +# +# z = f.proximal_conjugate(U, 0.2) +# f.proximal_conjugate(U, 0.2, out = RES) +# +# print(z[0].as_array()) +# print(RES[0].as_array()) +# +# print(z[1].as_array()) +# print(RES[1].as_array()) + + + + + + + +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index f96c7a1..d5e527a 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -121,12 +121,12 @@ class L2NormSquared(Function): # change the order cannot add ImageData + NestedBlock return (x - tau*self.b)/(1 + tau/2) else: - return x/(1 + tau/2 ) + return x/(1 + tau/2) else: if self.b is not None: - out.fill((x - tau*self.b)/(1 + tau/2)) + out.fill( (x - tau*self.b)/(1 + tau/2) ) else: - out.fill(x/(1 + tau/2 )) + out.fill( x/(1 + tau/2) ) def __rmul__(self, scalar): return ScaledFunction(self, scalar) @@ -227,7 +227,29 @@ if __name__ == '__main__': (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ - ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + + + print( " ####### check without out ######### " ) + + + u_out_no_out = ig.allocate('random_int') + res_no_out = f_scaled_data.proximal_conjugate(u_out_no_out, 0.5) + print(res_no_out.as_array()) + + print( " ####### check with out ######### " ) + + res_out = ig.allocate() + f_scaled_data.proximal_conjugate(u_out_no_out, 0.5, out = res_out) + + print(res_out.as_array()) + + numpy.testing.assert_array_almost_equal(res_no_out.as_array(), \ + res_out.as_array(), decimal=4) + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 4266e51..dd463c0 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -78,26 +78,44 @@ class MixedL21Norm(Function): def proximal_conjugate(self, x, tau, out=None): + + if self.SymTensor: - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res + if out is None: + param = [1]*x.shape[0] + param[-1] = 2 + tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] + frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] + res = BlockDataContainer(*frac) + return res + else: + pass + # tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha # res = x.divide(ImageData(tmp2).maximum(1.0)) else: + if out is None: + tmp = [ el*el for el in x] res = (sum(tmp).sqrt()).maximum(1.0) frac = [x[i]/res for i in range(x.shape[0])] res = BlockDataContainer(*frac) - - return res + + return res + + else: + + tmp = [ el*el for el in x] + res = (sum(tmp).sqrt()).maximum(1.0) + frac = [x[i]/res for i in range(x.shape[0])] +# res = (sum(x**2).sqrt()).maximum(1.0) +# return x/res + out.fill(frac) + + def __rmul__(self, scalar): return ScaledFunction(self, scalar) @@ -111,11 +129,14 @@ class MixedL21Norm(Function): if __name__ == '__main__': M, N, K = 2,3,5 - ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) - u1 = ig.allocate('random_int') - u2 = ig.allocate('random_int') + from ccpi.framework import BlockGeometry + import numpy - U = BlockDataContainer(u1, u2, shape=(2,1)) + ig = ImageGeometry(M, N) + + BG = BlockGeometry(ig, ig) + + U = BG.allocate('random_int') # Define no scale and scaled f_no_scaled = MixedL21Norm() @@ -125,9 +146,31 @@ if __name__ == '__main__': a1 = f_no_scaled(U) a2 = f_scaled(U) + print(a1, 2*a2) + + + print( " ####### check without out ######### " ) + + + u_out_no_out = BG.allocate('random_int') + res_no_out = f_scaled.proximal_conjugate(u_out_no_out, 0.5) + print(res_no_out[0].as_array()) + + print( " ####### check with out ######### " ) +# + res_out = BG.allocate() + f_scaled.proximal_conjugate(u_out_no_out, 0.5, out = res_out) +# + print(res_out[0].as_array()) +# + numpy.testing.assert_array_almost_equal(res_no_out[0].as_array(), \ + res_out[0].as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \ + res_out[1].as_array(), decimal=4) +# + - z1 = f_no_scaled.proximal_conjugate(U, 1) - z2 = f_scaled.proximal_conjugate(U, 1) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 046a4a6..9fcd4fc 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -61,7 +61,8 @@ class ScaledFunction(object): if out is None:
return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)
else:
- out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar))
+ self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out = out)
+ out *= self.scalar
def grad(self, x):
'''Alias of gradient(x,None)'''
@@ -89,3 +90,59 @@ class ScaledFunction(object): return self.function.proximal(x, tau*self.scalar)
else:
out.fill( self.function.proximal(x, tau*self.scalar) )
+
+if __name__ == '__main__':
+
+ from ccpi.optimisation.functions import L2NormSquared, MixedL21Norm
+ from ccpi.framework import ImageGeometry, BlockGeometry
+
+ M, N, K = 2,3,5
+ ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K)
+
+ u = ig.allocate('random_int')
+ b = ig.allocate('random_int')
+
+ BG = BlockGeometry(ig, ig)
+ U = BG.allocate('random_int')
+
+ f2 = 0.5 * L2NormSquared(b=b)
+ f1 = 30 * MixedL21Norm()
+ tau = 0.355
+
+ res_no_out1 = f1.proximal_conjugate(U, tau)
+ res_no_out2 = f2.proximal_conjugate(u, tau)
+
+
+# print( " ######## with out ######## ")
+ res_out1 = BG.allocate()
+ res_out2 = ig.allocate()
+
+ f1.proximal_conjugate(U, tau, out = res_out1)
+ f2.proximal_conjugate(u, tau, out = res_out2)
+
+
+ numpy.testing.assert_array_almost_equal(res_no_out1[0].as_array(), \
+ res_out1[0].as_array(), decimal=4)
+
+ numpy.testing.assert_array_almost_equal(res_no_out2.as_array(), \
+ res_out2.as_array(), decimal=4)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index c6a7f95..1d77510 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -139,11 +139,16 @@ class BlockOperator(Operator): for row in range(self.shape[0]): for col in range(self.shape[1]): if col == 0: - self.get_item(row,col).direct(x_b.get_item(col), out=tmp.get_item(col)) + self.get_item(row,col).direct( + x_b.get_item(col), + out=out.get_item(row)) else: - self.get_item(row,col).direct(x_b.get_item(col), out=out) - out+=tmp - + a = out.get_item(row) + self.get_item(row,col).direct( + x_b.get_item(col), + out=tmp.get_item(row)) + a += tmp.get_item(row) + def adjoint(self, x, out=None): '''Adjoint operation for the BlockOperator @@ -156,36 +161,72 @@ class BlockOperator(Operator): Raises: ValueError if the contained Operators are not linear ''' - if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True): + if not self.is_linear(): raise ValueError('Not all operators in Block are linear.') if not isinstance (x, BlockDataContainer): x_b = BlockDataContainer(x) else: x_b = x shape = self.get_output_shape(x_b.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(row, col).adjoint(x_b.get_item(col)) - else: - prod += self.get_item(row, col).adjoint(x_b.get_item(col)) - res.append(prod) - if self.shape[1]==1: - return ImageData(*res) + if out is None: + res = [] + for col in range(self.shape[1]): + for row in range(self.shape[0]): + if row == 0: + prod = self.get_item(row, col).adjoint(x_b.get_item(row)) + else: + prod += self.get_item(row, col).adjoint(x_b.get_item(row)) + res.append(prod) + if self.shape[1]==1: + return ImageData(*res) + else: + return BlockDataContainer(*res, shape=shape) else: - return BlockDataContainer(*res, shape=shape) - + #tmp = self.domain_geometry().allocate() + + for col in range(self.shape[1]): + for row in range(self.shape[0]): + if row == 0: + if issubclass(out.__class__, DataContainer): + self.get_item(row, col).adjoint( + x_b.get_item(row), + out=out) + else: + op = self.get_item(row,col) + self.get_item(row, col).adjoint( + x_b.get_item(row), + out=out.get_item(col)) + else: + if issubclass(out.__class__, DataContainer): + out += self.get_item(row,col).adjoint( + x_b.get_item(row)) + else: + a = out.get_item(col) + a += self.get_item(row,col).adjoint( + x_b.get_item(row), + ) + def is_linear(self): + '''returns whether all the elements of the BlockOperator are linear''' + return functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True) + def get_output_shape(self, xshape, adjoint=False): - sshape = self.shape[1] - oshape = self.shape[0] + '''returns the shape of the output BlockDataContainer + + A(N,M) direct u(M,1) -> N,1 + A(N,M)^T adjoint u(N,1) -> M,1 + ''' + rows , cols = self.shape + xrows, xcols = xshape + if xcols != 1: + raise ValueError('BlockDataContainer cannot have more than 1 column') if adjoint: - sshape = self.shape[0] - oshape = self.shape[1] - if sshape != xshape[0]: - raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - return (oshape, xshape[-1]) - + if rows != xrows: + raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) + return (cols,xcols) + if cols != xrows: + raise ValueError('Incompatible shapes {} {}'.format((rows,cols), xshape)) + return (rows,xcols) + def __rmul__(self, scalar): '''Defines the left multiplication with a scalar diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 54456cc..9c573cb 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -71,7 +71,7 @@ class Gradient(LinearOperator): self.FD.direction=self.ind[i] self.FD.adjoint(x.get_item(i), out = tmp) # FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) - out-=tmp + out+=tmp else: tmp = self.gm_domain.allocate() for i in range(x.shape[0]): @@ -220,16 +220,38 @@ if __name__ == '__main__': # u = G.domain_geometry().allocate('random_int') w = G.range_geometry().allocate('random_int') + + + print( "################ without out #############") + + print( (G.direct(u)*w).sum(), (u*G.adjoint(w)).sum() ) + + + print( "################ with out #############") + + res = G.range_geometry().allocate() + res1 = G.domain_geometry().allocate() + G.direct(u, out = res) + G.adjoint(w, out = res1) + + print( (res*w).sum(), (u*res1).sum() ) + + + # # - res = G.range_geometry().allocate() -# - G.direct(u, out=res) - z = G.direct(u) -# - print(res[0].as_array()) - print(z[0].as_array()) +# res = G.range_geometry().allocate() +## +# G.direct(u, out=res) +# z = G.direct(u) +## +# print(res[0].as_array()) +# print(z[0].as_array()) # + + + + ## LHS = (G.direct(u)*w).sum() ## RHS = (u * G.adjoint(w)).sum() diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py index adcc6d9..0d5030c 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py @@ -3,12 +3,10 @@ import numpy class ScaledOperator(object): '''ScaledOperator - A class to represent the scalar multiplication of an Operator with a scalar. It holds an operator and a scalar. Basically it returns the multiplication of the result of direct and adjoint of the operator with the scalar. For the rest it behaves like the operator it holds. - Args: operator (Operator): a Operator or LinearOperator scalar (Number): a scalar multiplier @@ -28,10 +26,18 @@ class ScaledOperator(object): self.scalar = scalar self.operator = operator def direct(self, x, out=None): - return self.scalar * self.operator.direct(x, out=out) + if out is None: + return self.scalar * self.operator.direct(x, out=out) + else: + self.operator.direct(x, out=out) + out *= self.scalar def adjoint(self, x, out=None): if self.operator.is_linear(): - return self.scalar * self.operator.adjoint(x, out=out) + if out is None: + return self.scalar * self.operator.adjoint(x, out=out) + else: + self.operator.adjoint(x, out=out) + out *= self.scalar else: raise TypeError('Operator is not linear') def norm(self): @@ -40,3 +46,5 @@ class ScaledOperator(object): return self.operator.range_geometry() def domain_geometry(self): return self.operator.domain_geometry() + def is_linear(self): + return self.operator.is_linear()
\ No newline at end of file diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index d871ba0..feb09ee 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -24,7 +24,7 @@ from skimage.util import random_noise # ############################################################################ # Create phantom for TV denoising -N = 200 +N = 500 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -45,7 +45,7 @@ plt.show() alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' +method = '0' if method == '0': @@ -83,14 +83,27 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -opt = {'niter':2000} +opt = {'niter':100} +opt1 = {'niter':100, 'memopt': True} +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) - + plt.figure(figsize=(5,5)) plt.imshow(res.as_array()) plt.colorbar() plt.show() + +plt.figure(figsize=(5,5)) +plt.imshow(res1.as_array()) +plt.colorbar() +plt.show() + + +plt.figure(figsize=(5,5)) +plt.imshow(np.abs(res1.as_array()-res.as_array())) +plt.colorbar() +plt.show() #pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) #pdhg.max_iteration = 2000 diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py index eb7eef4..cec9770 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py @@ -34,7 +34,7 @@ ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) ag = ig # Create noisy data. Add Gaussian noise -n1 = random_noise(data, mode = 's&p', salt_vs_pepper = 0.9) +n1 = random_noise(data, mode = 's&p', salt_vs_pepper = 0.9, amount=0.2) noisy_data = ImageData(n1) plt.imshow(noisy_data.as_array()) @@ -44,10 +44,10 @@ plt.show() #%% # Regularisation Parameter -alpha = 10 +alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' +method = '0' if method == '0': # Create operators @@ -78,15 +78,27 @@ else: ########################################################################### #%% -# Compute operator Norm -normK = operator.norm() -print ("normK", normK) -# Primal & dual stepsizes -#sigma = 1 -#tau = 1/(sigma*normK**2) - -sigma = 1/normK -tau = 1/normK +diag_precon = True + +if diag_precon: + + def tau_sigma_precond(operator): + + tau = 1/operator.sum_abs_row() + sigma = 1/ operator.sum_abs_col() + + return tau, sigma + + tau, sigma = tau_sigma_precond(operator) + +else: + # Compute operator Norm + normK = operator.norm() + print ("normK", normK) + # Primal & dual stepsizes + sigma = 1/normK + tau = 1/normK +# tau = 1/(sigma*normK**2) opt = {'niter':2000} diff --git a/Wrappers/Python/wip/test_profile.py b/Wrappers/Python/wip/test_profile.py index 7be19f9..a97ad8d 100644 --- a/Wrappers/Python/wip/test_profile.py +++ b/Wrappers/Python/wip/test_profile.py @@ -9,18 +9,59 @@ Created on Mon Apr 8 13:57:46 2019 # profile direct, adjoint, gradient from ccpi.framework import ImageGeometry -from ccpi.optimisation.operators import Gradient +from ccpi.optimisation.operators import Gradient, BlockOperator, Identity -N, M = 500, 500 +N, M, K = 200, 300, 100 -ig = ImageGeometry(N, M) +ig = ImageGeometry(N, M, K) G = Gradient(ig) +Id = Identity(ig) u = G.domain_geometry().allocate('random_int') w = G.range_geometry().allocate('random_int') -for i in range(500): + +res = G.range_geometry().allocate() +res1 = G.domain_geometry().allocate() +# +# +#LHS = (G.direct(u)*w).sum() +#RHS = (u * G.adjoint(w)).sum() +# +#print(G.norm()) +#print(LHS, RHS) +# +##%%%re +## +#G.direct(u, out=res) +#G.adjoint(w, out=res1) +## +#LHS1 = (res * w).sum() +#RHS1 = (u * res1).sum() +## +#print(LHS1, RHS1) + +B = BlockOperator(2*G, 3*Id) +uB = B.domain_geometry().allocate('random_int') +resB = B.range_geometry().allocate() + +#z2 = B.direct(uB) +#B.direct(uB, out = resB) + +#%% + +for i in range(100): +# +# z2 = B.direct(uB) +# + B.direct(uB, out = resB) + +# z1 = G.adjoint(w) +# z = G.direct(u) + +# G.adjoint(w, out=res1) + +# G.direct(u, out=res) - res = G.adjoint(w)
\ No newline at end of file |