diff options
author | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-30 11:13:46 +0100 |
---|---|---|
committer | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-30 11:13:46 +0100 |
commit | 5cea311e7364550943b10a0ff04bb11f6fc5c0e1 (patch) | |
tree | 2c99da76a9da99d2e555ea29726d09b730611d64 /Wrappers | |
parent | 98fe6b31fff9dcb212e83b96d0453743c6e4edd5 (diff) | |
download | framework-5cea311e7364550943b10a0ff04bb11f6fc5c0e1.tar.gz framework-5cea311e7364550943b10a0ff04bb11f6fc5c0e1.tar.bz2 framework-5cea311e7364550943b10a0ff04bb11f6fc5c0e1.tar.xz framework-5cea311e7364550943b10a0ff04bb11f6fc5c0e1.zip |
try fix conda
Diffstat (limited to 'Wrappers')
30 files changed, 1085 insertions, 512 deletions
diff --git a/Wrappers/Python/build/lib/ccpi/contrib/__init__.py b/Wrappers/Python/build/lib/ccpi/contrib/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/Wrappers/Python/build/lib/ccpi/contrib/__init__.py diff --git a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/__init__.py b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/__init__.py diff --git a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/__init__.py b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/__init__.py diff --git a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/spdhg.py b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/spdhg.py new file mode 100644 index 0000000..263a7cd --- /dev/null +++ b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/spdhg.py @@ -0,0 +1,338 @@ +# Copyright 2018 Matthias Ehrhardt, Edoardo Pasca
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy
+
+from ccpi.optimisation.funcs import Function
+from ccpi.framework import ImageData
+from ccpi.framework import AcquisitionData
+
+
+class spdhg():
+ """Computes a saddle point with a stochastic PDHG.
+
+ This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that
+
+ (x*, y*) in arg min_x max_y sum_i=1^n <y_i, A_i> - f*[i](y_i) + g(x)
+
+ where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and
+ proper functionals. For this algorithm, they all may be non-smooth and no
+ strong convexity is assumed.
+
+ Parameters
+ ----------
+ f : list of functions
+ Functionals Y[i] -> IR_infty that all have a convex conjugate with a
+ proximal operator, i.e.
+ f[i].convex_conj.prox(sigma[i]) : Y[i] -> Y[i].
+ g : function
+ Functional X -> IR_infty that has a proximal operator, i.e.
+ g.prox(tau) : X -> X.
+ A : list of functions
+ Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint
+ x : primal variable, optional
+ By default equals 0.
+ y : dual variable, optional
+ Part of a product space. By default equals 0.
+ z : variable, optional
+ Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0.
+ tau : scalar / vector / matrix, optional
+ Step size for primal variable. Note that the proximal operator of g
+ has to be well-defined for this input.
+ sigma : scalar, optional
+ Scalar / vector / matrix used as step size for dual variable. Note that
+ the proximal operator related to f (see above) has to be well-defined
+ for this input.
+ prob : list of scalars, optional
+ Probabilities prob[i] that a subset i is selected in each iteration.
+ If fun_select is not given, then the sum of all probabilities must
+ equal 1.
+ A_norms : list of scalars, optional
+ Norms of the operators in A. Can be used to determine the step sizes
+ tau and sigma and the probabilities prob.
+ fun_select : function, optional
+ Function that selects blocks at every iteration IN -> {1,...,n}. By
+ default this is serial sampling, fun_select(k) selects an index
+ i \in {1,...,n} with probability prob[i].
+
+ References
+ ----------
+ [CERS2018] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
+ *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
+ and Imaging Applications*. SIAM Journal on Optimization, 28(4), 2783-2808
+ (2018) http://doi.org/10.1007/s10851-010-0251-1
+
+ [E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott,
+ A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a
+ stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII,
+ 58 (2017) http://doi.org/10.1117/12.2272946.
+
+ [EMS2018] M. J. Ehrhardt, P. J. Markiewicz and C.-B. Schoenlieb, *Faster
+ PET Reconstruction with Non-Smooth Priors by Randomization and
+ Preconditioning*. (2018) ArXiv: http://arxiv.org/abs/1808.07150
+ """
+
+ def __init__(self, f, g, A, x=None, y=None, z=None, tau=None, sigma=None,
+ prob=None, A_norms=None, fun_select=None):
+ # fun_select is optional and by default performs serial sampling
+
+ if x is None:
+ x = A[0].allocate_direct(0)
+
+ if y is None:
+ if z is not None:
+ raise ValueError('y and z have to be defaulted together')
+
+ y = [Ai.allocate_adjoint(0) for Ai in A]
+ z = 0 * x.copy()
+
+ else:
+ if z is None:
+ raise ValueError('y and z have to be defaulted together')
+
+ if A_norms is not None:
+ if tau is not None or sigma is not None or prob is not None:
+ raise ValueError('Either A_norms or (tau, sigma, prob) must '
+ 'be given')
+
+ tau = 1 / sum(A_norms)
+ sigma = [1 / nA for nA in A_norms]
+ prob = [nA / sum(A_norms) for nA in A_norms]
+
+ #uniform prob, needs different sigma and tau
+ #n = len(A)
+ #prob = [1./n] * n
+
+ if fun_select is None:
+ if prob is None:
+ raise ValueError('prob was not determined')
+
+ def fun_select(k):
+ return [int(numpy.random.choice(len(A), 1, p=prob))]
+
+ self.iter = 0
+ self.x = x
+
+ self.y = y
+ self.z = z
+
+ self.f = f
+ self.g = g
+ self.A = A
+ self.tau = tau
+ self.sigma = sigma
+ self.prob = prob
+ self.fun_select = fun_select
+
+ # Initialize variables
+ self.z_relax = z.copy()
+ self.tmp = self.x.copy()
+
+ def update(self):
+ # select block
+ selected = self.fun_select(self.iter)
+
+ # update primal variable
+ #tmp = (self.x - self.tau * self.z_relax).as_array()
+ #self.x.fill(self.g.prox(tmp, self.tau))
+ self.tmp = - self.tau * self.z_relax
+ self.tmp += self.x
+ self.x = self.g.prox(self.tmp, self.tau)
+
+ # update dual variable and z, z_relax
+ self.z_relax = self.z.copy()
+ for i in selected:
+ # save old yi
+ y_old = self.y[i].copy()
+
+ # y[i]= prox(tmp)
+ tmp = y_old + self.sigma[i] * self.A[i].direct(self.x)
+ self.y[i] = self.f[i].convex_conj.prox(tmp, self.sigma[i])
+
+ # update adjoint of dual variable
+ dz = self.A[i].adjoint(self.y[i] - y_old)
+ self.z += dz
+
+ # compute extrapolation
+ self.z_relax += (1 + 1 / self.prob[i]) * dz
+
+ self.iter += 1
+
+
+## Functions
+
+class KullbackLeibler(Function):
+ def __init__(self, data, background):
+ self.data = data
+ self.background = background
+ self.__offset = None
+
+ def __call__(self, x):
+ """Return the KL-diveregnce in the point ``x``.
+
+ If any components of ``x`` is non-positive, the value is positive
+ infinity.
+
+ Needs one extra array of memory of the size of `prior`.
+ """
+
+ # define short variable names
+ y = self.data
+ r = self.background
+
+ # Compute
+ # sum(x + r - y + y * log(y / (x + r)))
+ # = sum(x - y * log(x + r)) + self.offset
+ # Assume that
+ # x + r > 0
+
+ # sum the result up
+ obj = numpy.sum(x - y * numpy.log(x + r)) + self.offset()
+
+ if numpy.isnan(obj):
+ # In this case, some element was less than or equal to zero
+ return numpy.inf
+ else:
+ return obj
+
+ @property
+ def convex_conj(self):
+ """The convex conjugate functional of the KL-functional."""
+ return KullbackLeiblerConvexConjugate(self.data, self.background)
+
+ def offset(self):
+ """The offset which is independent of the unknown."""
+
+ if self.__offset is None:
+ tmp = self.domain.element()
+
+ # define short variable names
+ y = self.data
+ r = self.background
+
+ tmp = self.domain.element(numpy.maximum(y, 1))
+ tmp = r - y + y * numpy.log(tmp)
+
+ # sum the result up
+ self.__offset = numpy.sum(tmp)
+
+ return self.__offset
+
+# def __repr__(self):
+# """to be added???"""
+# """Return ``repr(self)``."""
+ # return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,
+ ## self.domain, self.data,
+ # self.background)
+
+
+class KullbackLeiblerConvexConjugate(Function):
+ """The convex conjugate of Kullback-Leibler divergence functional.
+
+ Notes
+ -----
+ The functional :math:`F^*` with prior :math:`g>0` is given by:
+
+ .. math::
+ F^*(x)
+ =
+ \\begin{cases}
+ \\sum_{i} \left( -g_i \ln(1 - x_i) \\right)
+ & \\text{if } x_i < 1 \\forall i
+ \\\\
+ +\\infty & \\text{else}
+ \\end{cases}
+
+ See Also
+ --------
+ KullbackLeibler : convex conjugate functional
+ """
+
+ def __init__(self, data, background):
+ self.data = data
+ self.background = background
+
+ def __call__(self, x):
+ y = self.data
+ r = self.background
+
+ tmp = numpy.sum(- x * r - y * numpy.log(1 - x))
+
+ if numpy.isnan(tmp):
+ # In this case, some element was larger than or equal to one
+ return numpy.inf
+ else:
+ return tmp
+
+
+ def prox(self, x, tau, out=None):
+ # Let y = data, r = background, z = x + tau * r
+ # Compute 0.5 * (z + 1 - sqrt((z - 1)**2 + 4 * tau * y))
+ # Currently it needs 3 extra copies of memory.
+
+ if out is None:
+ out = x.copy()
+
+ # define short variable names
+ try: # this should be standard SIRF/CIL mode
+ y = self.data.as_array()
+ r = self.background.as_array()
+ x = x.as_array()
+
+ try:
+ taua = tau.as_array()
+ except:
+ taua = tau
+
+ z = x + tau * r
+
+ out.fill(0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y)))
+
+ return out
+
+ except: # e.g. for NumPy
+ y = self.data
+ r = self.background
+
+ try:
+ taua = tau.as_array()
+ except:
+ taua = tau
+
+ z = x + tau * r
+
+ out[:] = 0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y))
+
+ return out
+
+ @property
+ def convex_conj(self):
+ return KullbackLeibler(self.data, self.background)
+
+
+def mult(x, y):
+ try:
+ xa = x.as_array()
+ except:
+ xa = x
+
+ out = y.clone()
+ out.fill(xa * y.as_array())
+
+ return out
diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py index 386cd87..166014b 100644 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py @@ -270,6 +270,7 @@ class BlockDataContainer(object): def squared_norm(self): y = numpy.asarray([el.squared_norm() for el in self.containers]) return y.sum() + def norm(self): return numpy.sqrt(self.squared_norm()) @@ -442,9 +443,12 @@ class BlockDataContainer(object): '''Inline truedivision''' return self.__idiv__(other) + def dot(self, other): +# + tmp = [ self.containers[i].dot(other.containers[i]) for i in range(self.shape[0])] + return sum(tmp) - - + if __name__ == '__main__': @@ -456,6 +460,7 @@ if __name__ == '__main__': BG = BlockGeometry(ig, ig) U = BG.allocate('random_int') + V = BG.allocate('random_int') print ("test sum BDC " ) @@ -468,10 +473,10 @@ if __name__ == '__main__': z1 = sum(U**2).sqrt().as_array() numpy.testing.assert_array_equal(z, z1) - - z2 = U.pnorm(2) + zzz = U.dot(V) + diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py index 5dd6750..ed44d99 100644 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py +++ b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py @@ -31,7 +31,50 @@ class BlockGeometry(object): '''returns the Geometry in the BlockGeometry located at position index'''
return self.geometries[index]
- def allocate(self, value=0, dimension_labels=None):
+ def allocate(self, value=0, dimension_labels=None, **kwargs):
+
+ symmetry = kwargs.get('symmetry',False)
containers = [geom.allocate(value) for geom in self.geometries]
+
+ if symmetry == True:
+
+ # for 2x2
+ # [ ig11, ig12\
+ # ig21, ig22]
+
+ # Row-wise Order
+
+ if len(containers)==4:
+ containers[1]=containers[2]
+
+ # for 3x3
+ # [ ig11, ig12, ig13\
+ # ig21, ig22, ig23\
+ # ig31, ig32, ig33]
+
+ elif len(containers)==9:
+ containers[1]=containers[3]
+ containers[2]=containers[6]
+ containers[5]=containers[7]
+
+ # for 4x4
+ # [ ig11, ig12, ig13, ig14\
+ # ig21, ig22, ig23, ig24\
+ # ig31, ig32, ig33, ig34
+ # ig41, ig42, ig43, ig44]
+
+ elif len(containers) == 16:
+ containers[1]=containers[4]
+ containers[2]=containers[8]
+ containers[3]=containers[12]
+ containers[6]=containers[9]
+ containers[7]=containers[10]
+ containers[11]=containers[15]
+
+
+
+
return BlockDataContainer(*containers)
+
+
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py index ed95c3f..12cbabc 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py @@ -145,13 +145,40 @@ class Algorithm(object): if self.should_stop(): print ("Stop cryterion has been reached.") i = 0 + +# print("Iteration {:<5} Primal {:<5} Dual {:<5} PDgap".format('','','')) for _ in self: - if verbose and self.iteration % self.update_objective_interval == 0: - print ("Iteration {}/{}, objective {}".format(self.iteration, - self.max_iteration, self.get_last_objective()) ) - else: + + + if self.iteration % self.update_objective_interval == 0: + if callback is not None: - callback(self.iteration, self.get_last_objective()) + callback(self.iteration, self.get_last_objective(), self.x) + + else: + + if verbose: + +# if verbose and self.iteration % self.update_objective_interval == 0: + #pass + # \t for tab +# print( "{:04}/{:04} {:<5} {:.4f} {:<5} {:.4f} {:<5} {:.4f}".\ +# format(self.iteration, self.max_iteration,'', \ +# self.get_last_objective()[0],'',\ +# self.get_last_objective()[1],'',\ +# self.get_last_objective()[2])) + + + print ("Iteration {}/{}, {}".format(self.iteration, + self.max_iteration, self.get_last_objective()) ) + + #print ("Iteration {}/{}, Primal, Dual, PDgap = {}".format(self.iteration, + # self.max_iteration, self.get_last_objective()) ) + + +# else: +# if callback is not None: +# callback(self.iteration, self.get_last_objective(), self.x) i += 1 if i == iterations: break diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py index 8ea2b6c..ee51049 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py @@ -65,10 +65,10 @@ class FISTA(Algorithm): # initialization if memopt: - self.y = x_init.clone() - self.x_old = x_init.clone() - self.x = x_init.clone() - self.u = x_init.clone() + self.y = x_init.copy() + self.x_old = x_init.copy() + self.x = x_init.copy() + self.u = x_init.copy() else: self.x_old = x_init.copy() self.y = x_init.copy() diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py index a41bd48..0f5e8ef 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py @@ -13,6 +13,7 @@ import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer from ccpi.optimisation.functions import FunctionOperatorComposition +import matplotlib.pyplot as plt class PDHG(Algorithm): '''Primal Dual Hybrid Gradient''' @@ -46,34 +47,43 @@ class PDHG(Algorithm): self.y_old = self.operator.range_geometry().allocate() self.xbar = self.x_old.copy() - + self.x_tmp = self.x_old.copy() self.x = self.x_old.copy() - self.y = self.y_old.copy() - if self.memopt: - self.y_tmp = self.y_old.copy() - self.x_tmp = self.x_old.copy() - #y = y_tmp + + self.y_tmp = self.y_old.copy() + self.y = self.y_tmp.copy() + + + + #self.y = self.y_old.copy() + + + #if self.memopt: + # self.y_tmp = self.y_old.copy() + # self.x_tmp = self.x_old.copy() + # relaxation parameter self.theta = 1 def update(self): + if self.memopt: # Gradient descent, Dual problem solution # self.y_old += self.sigma * self.operator.direct(self.xbar) self.operator.direct(self.xbar, out=self.y_tmp) self.y_tmp *= self.sigma - self.y_old += self.y_tmp + self.y_tmp += self.y_old #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) - self.f.proximal_conjugate(self.y_old, self.sigma, out=self.y) + self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) # Gradient ascent, Primal problem solution self.operator.adjoint(self.y, out=self.x_tmp) - self.x_tmp *= self.tau - self.x_old -= self.x_tmp + self.x_tmp *= -1*self.tau + self.x_tmp += self.x_old - self.g.proximal(self.x_old, self.tau, out=self.x) + self.g.proximal(self.x_tmp, self.tau, out=self.x) #Update self.x.subtract(self.x_old, out=self.xbar) @@ -82,7 +92,8 @@ class PDHG(Algorithm): self.x_old.fill(self.x) self.y_old.fill(self.y) - + + else: # Gradient descent, Dual problem solution self.y_old += self.sigma * self.operator.direct(self.xbar) @@ -93,18 +104,28 @@ class PDHG(Algorithm): self.x = self.g.proximal(self.x_old, self.tau) #Update - #xbar = x + theta * (x - x_old) - self.xbar.fill(self.x) - self.xbar -= self.x_old + self.x.subtract(self.x_old, out=self.xbar) self.xbar *= self.theta self.xbar += self.x - self.x_old = self.x - self.y_old = self.y + self.x_old.fill(self.x) + self.y_old.fill(self.y) + + #xbar = x + theta * (x - x_old) +# self.xbar.fill(self.x) +# self.xbar -= self.x_old +# self.xbar *= self.theta +# self.xbar += self.x + +# self.x_old.fill(self.x) +# self.y_old.fill(self.y) + + def update_objective(self): + p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) - d1 = -(self.f.convex_conjugate(self.y) + self.g(-1*self.operator.adjoint(self.y))) + d1 = -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(-1*self.operator.adjoint(self.y))) self.loss.append([p1,d1,p1-d1]) @@ -151,8 +172,8 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): if not memopt: - - y_tmp = y_old + sigma * operator.direct(xbar) + + y_tmp = y_old + sigma * operator.direct(xbar) y = f.proximal_conjugate(y_tmp, sigma) x_tmp = x_old - tau*operator.adjoint(y) @@ -161,20 +182,19 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x.subtract(x_old, out=xbar) xbar *= theta xbar += x + + if i%50==0: + + p1 = f(operator.direct(x)) + g(x) + d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) + primal.append(p1) + dual.append(d1) + pdgap.append(p1-d1) + print(p1, d1, p1-d1) x_old.fill(x) y_old.fill(y) - - - if i%10==0: -# - p1 = f(operator.direct(x)) + g(x) - print(p1) -# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) -# primal.append(p1) -# dual.append(d1) -# pdgap.append(p1-d1) -# print(p1, d1, p1-d1) + else: @@ -184,7 +204,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): f.proximal_conjugate(y_tmp, sigma, out=y) operator.adjoint(y, out = x_tmp) - x_tmp *= -tau + x_tmp *= -1*tau x_tmp += x_old g.proximal(x_tmp, tau, out = x) @@ -192,19 +212,20 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x.subtract(x_old, out=xbar) xbar *= theta xbar += x - - x_old.fill(x) - y_old.fill(y) - if i%10==0: -# + if i%50==0: + p1 = f(operator.direct(x)) + g(x) - print(p1) -# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) -# primal.append(p1) -# dual.append(d1) -# pdgap.append(p1-d1) -# print(p1, d1, p1-d1) + d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) + primal.append(p1) + dual.append(d1) + pdgap.append(p1-d1) + print(p1, d1, p1-d1) + + x_old.fill(x) + y_old.fill(y) + + t_end = time.time() diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py b/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py index efc465c..b2b9791 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ccpi.optimisation.ops import Identity, FiniteDiff2D import numpy from ccpi.framework import DataContainer import warnings @@ -99,12 +98,6 @@ class Norm2(Function): raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) -class TV2D(Norm2): - - def __init__(self, gamma): - super(TV2D,self).__init__(gamma, 0) - self.op = FiniteDiff2D() - self.L = self.op.get_max_sing_val() # Define a class for squared 2-norm diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py index bf627a5..0836324 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py @@ -93,16 +93,28 @@ class BlockFunction(Function): ''' + + if out is None: - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) + out = [None]*self.length + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i)) + + return BlockDataContainer(*out) + else: - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) + if isinstance(tau, Number): + for i in range(self.length): + self.functions[i].proximal(x.get_item(i), tau, out[i]) + else: + for i in range(self.length): + self.functions[i].proximal(x.get_item(i), tau.get_item(i), out[i]) + + def gradient(self,x, out=None): diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py index 70511bb..8895f3d 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -19,16 +19,13 @@ class FunctionOperatorComposition(Function): ''' - def __init__(self, operator, function): + def __init__(self, function, operator): super(FunctionOperatorComposition, self).__init__() + self.function = function self.operator = operator - alpha = 1 - - if isinstance (function, ScaledFunction): - alpha = function.scalar - self.L = 2 * alpha * operator.norm()**2 + self.L = function.L * operator.norm()**2 def __call__(self, x): @@ -39,47 +36,57 @@ class FunctionOperatorComposition(Function): ''' - return self.function(self.operator.direct(x)) + return self.function(self.operator.direct(x)) + + def gradient(self, x, out=None): +# + ''' Gradient takes into account the Operator''' + if out is None: + return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) + else: + tmp = self.operator.range_geometry().allocate() + self.operator.direct(x, out=tmp) + self.function.gradient(tmp, out=tmp) + self.operator.adjoint(tmp, out=out) - #TODO do not know if we need it - def call_adjoint(self, x): - return self.function(self.operator.adjoint(x)) + + #TODO do not know if we need it + #def call_adjoint(self, x): + # + # return self.function(self.operator.adjoint(x)) - def convex_conjugate(self, x): - - ''' convex_conjugate does not take into account the Operator''' - return self.function.convex_conjugate(x) - def proximal(self, x, tau, out=None): - - '''proximal does not take into account the Operator''' - if out is None: - return self.function.proximal(x, tau) - else: - self.function.proximal(x, tau, out=out) - + #def convex_conjugate(self, x): + # + # ''' convex_conjugate does not take into account the Operator''' + # return self.function.convex_conjugate(x) - def proximal_conjugate(self, x, tau, out=None): + - ''' proximal conjugate does not take into account the Operator''' - if out is None: - return self.function.proximal_conjugate(x, tau) - else: - self.function.proximal_conjugate(x, tau, out=out) - def gradient(self, x, out=None): - - ''' Gradient takes into account the Operator''' - if out is None: - return self.operator.adjoint( - self.function.gradient(self.operator.direct(x)) - ) - else: - self.operator.adjoint( - self.function.gradient(self.operator.direct(x), - out=out) - ) + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + from ccpi.optimisation.operators import Gradient + from ccpi.optimisation.functions import L2NormSquared + + M, N, K = 2,3 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) + + G = Gradient(ig) + alpha = 0.5 + + f = L2NormSquared() + f_comp = FunctionOperatorComposition(G, alpha * f) + x = ig.allocate('random_int') + print(f_comp.gradient(x).shape + + ) + + +
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition_old.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition_old.py new file mode 100644 index 0000000..70511bb --- /dev/null +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition_old.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 8 09:55:36 2019 + +@author: evangelos +""" + +from ccpi.optimisation.functions import Function +from ccpi.optimisation.functions import ScaledFunction + + +class FunctionOperatorComposition(Function): + + ''' Function composition with Operator, i.e., f(Ax) + + A: operator + f: function + + ''' + + def __init__(self, operator, function): + + super(FunctionOperatorComposition, self).__init__() + self.function = function + self.operator = operator + alpha = 1 + + if isinstance (function, ScaledFunction): + alpha = function.scalar + self.L = 2 * alpha * operator.norm()**2 + + + def __call__(self, x): + + ''' Evaluate FunctionOperatorComposition at x + + returns f(Ax) + + ''' + + return self.function(self.operator.direct(x)) + + #TODO do not know if we need it + def call_adjoint(self, x): + + return self.function(self.operator.adjoint(x)) + + + def convex_conjugate(self, x): + + ''' convex_conjugate does not take into account the Operator''' + return self.function.convex_conjugate(x) + + def proximal(self, x, tau, out=None): + + '''proximal does not take into account the Operator''' + if out is None: + return self.function.proximal(x, tau) + else: + self.function.proximal(x, tau, out=out) + + + def proximal_conjugate(self, x, tau, out=None): + + ''' proximal conjugate does not take into account the Operator''' + if out is None: + return self.function.proximal_conjugate(x, tau) + else: + self.function.proximal_conjugate(x, tau, out=out) + + def gradient(self, x, out=None): + + ''' Gradient takes into account the Operator''' + if out is None: + return self.operator.adjoint( + self.function.gradient(self.operator.direct(x)) + ) + else: + self.operator.adjoint( + self.function.gradient(self.operator.direct(x), + out=out) + ) + +
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/KullbackLeibler.py index 7889cad..b53f669 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/KullbackLeibler.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/KullbackLeibler.py @@ -20,7 +20,8 @@ import numpy from ccpi.optimisation.functions import Function from ccpi.optimisation.functions.ScaledFunction import ScaledFunction -from ccpi.framework import ImageData +from ccpi.framework import ImageData, ImageGeometry +import functools class KullbackLeibler(Function): @@ -37,16 +38,23 @@ class KullbackLeibler(Function): def __call__(self, x): - - # TODO check - tmp = x + self.bnoise - ind = tmp.as_array()>0 + res_tmp = numpy.zeros(x.shape) + + tmp = x + self.bnoise + ind = x.as_array()>0 + + res_tmp[ind] = x.as_array()[ind] - self.b.as_array()[ind] * numpy.log(tmp.as_array()[ind]) + + return res_tmp.sum() + + def log(self, datacontainer): + '''calculates the in-place log of the datacontainer''' + if not functools.reduce(lambda x,y: x and y>0, + datacontainer.as_array().ravel(), True): + raise ValueError('KullbackLeibler. Cannot calculate log of negative number') + datacontainer.fill( numpy.log(datacontainer.as_array()) ) - res = x.as_array()[ind] - self.b.as_array()[ind] * numpy.log(tmp.as_array()[ind]) - - return sum(res) - def gradient(self, x, out=None): @@ -54,27 +62,42 @@ class KullbackLeibler(Function): if out is None: return 1 - self.b/(x + self.bnoise) else: - self.b.divide(x+self.bnoise, out=out) + x.add(self.bnoise, out=out) + self.b.divide(out, out=out) out.subtract(1, out=out) - + out *= -1 + def convex_conjugate(self, x): - tmp = self.b/( 1 - x ) + tmp = self.b/(1-x) ind = tmp.as_array()>0 - sel - - return (self.b * ( ImageData( numpy.log(tmp) ) - 1 ) - self.bnoise * (x - 1)).sum() + return (self.b.as_array()[ind] * (numpy.log(tmp.as_array()[ind])-1)).sum() + def proximal(self, x, tau, out=None): if out is None: return 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() ) else: - tmp = 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() ) - out.fill(tmp) - + tmp = 0.5 *( (x - self.bnoise - tau) + + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() + ) + x.add(self.bnoise, out=out) + out -= tau + out *= out + tmp = self.b * (4 * tau) + out.add(tmp, out=out) + out.sqrt(out=out) + + x.subtract(self.bnoise, out=tmp) + tmp -= tau + + out += tmp + + out *= 0.5 + def proximal_conjugate(self, x, tau, out=None): @@ -82,10 +105,21 @@ class KullbackLeibler(Function): z = x + tau * self.bnoise return 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) else: - z = x + tau * self.bnoise - res = 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) - out.fill(res) +# z = x + tau * self.bnoise +# out.fill( 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) ) + + tmp1 = x + tau * self.bnoise - 1 + tmp2 = tmp1 + 2 + self.b.multiply(4*tau, out=out) + tmp1.multiply(tmp1, out=tmp1) + out += tmp1 + out.sqrt(out=out) + + out *= -1 + out += tmp2 + out *= 0.5 + def __rmul__(self, scalar): @@ -106,6 +140,7 @@ if __name__ == '__main__': from ccpi.framework import ImageGeometry import numpy + N, M = 2,3 ig = ImageGeometry(N, M) data = ImageData(numpy.random.randint(-10, 10, size=(M, N))) @@ -137,4 +172,4 @@ if __name__ == '__main__': # -
\ No newline at end of file + diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py index 6d3bf86..df38f15 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py @@ -19,6 +19,7 @@ from ccpi.optimisation.functions import Function from ccpi.optimisation.functions.ScaledFunction import ScaledFunction +from ccpi.optimisation.functions import FunctionOperatorComposition class L2NormSquared(Function): @@ -71,7 +72,7 @@ class L2NormSquared(Function): def convex_conjugate(self, x): ''' Evaluate convex conjugate of L2NormSquared at x: f^{*}(x)''' - + tmp = 0 if self.b is not None: @@ -93,21 +94,12 @@ class L2NormSquared(Function): if self.b is None: return x/(1+2*tau) else: -# tmp = x -# tmp -= self.b -# tmp /= (1+2*tau) -# tmp += self.b -# return tmp - return (x-self.b)/(1+2*tau) + self.b - -# if self.b is not None: -# out=x -# if self.b is not None: -# out -= self.b -# out /= (1+2*tau) -# if self.b is not None: -# out += self.b -# return out + + tmp = x.subtract(self.b) + tmp /= (1+2*tau) + tmp += self.b + return tmp + else: out.fill(x) if self.b is not None: @@ -145,7 +137,13 @@ class L2NormSquared(Function): ''' - return ScaledFunction(self, scalar) + return ScaledFunction(self, scalar) + + + def composition(self, operator): + + return FunctionOperatorComposition(operator) + if __name__ == '__main__': diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py index 2004e5f..e8f6da4 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py @@ -43,19 +43,9 @@ class MixedL21Norm(Function): ''' if not isinstance(x, BlockDataContainer): raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x))) - - if self.SymTensor: - - #TODO fix this case - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - res = sum(tmp).sqrt().sum() - - else: - - tmp = [ el**2 for el in x.containers ] - res = sum(tmp).sqrt().sum() + + tmp = [ el**2 for el in x.containers ] + res = sum(tmp).sqrt().sum() return res @@ -67,7 +57,12 @@ class MixedL21Norm(Function): ''' This is the Indicator function of ||\cdot||_{2, \infty} which is either 0 if ||x||_{2, \infty} or \infty ''' + return 0.0 + + #tmp = [ el**2 for el in x.containers ] + #print(sum(tmp).sqrt().as_array().max()) + #return sum(tmp).sqrt().as_array().max() def proximal(self, x, tau, out=None): @@ -80,35 +75,24 @@ class MixedL21Norm(Function): def proximal_conjugate(self, x, tau, out=None): - if self.SymTensor: - - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res - - else: - if out is None: - tmp = [ el*el for el in x.containers] - res = sum(tmp).sqrt().maximum(1.0) - frac = [el/res for el in x.containers] - return BlockDataContainer(*frac) + if out is None: + tmp = [ el*el for el in x.containers] + res = sum(tmp).sqrt().maximum(1.0) + frac = [el/res for el in x.containers] + return BlockDataContainer(*frac) + - - #TODO this is slow, why??? + #TODO this is slow, why??? # return x.divide(x.pnorm().maximum(1.0)) - else: - - res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) - res = res1.sqrt().maximum(1.0) - x.divide(res, out=out) - + else: + + res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) + res = res1.sqrt().maximum(1.0) + x.divide(res, out=out) + # x.divide(sum([el*el for el in x.containers]).sqrt().maximum(1.0), out=out) - #TODO this is slow, why ??? + #TODO this is slow, why ??? # x.divide(x.pnorm().maximum(1.0), out=out) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py index 7caeab2..1db223b 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py @@ -59,7 +59,8 @@ class ScaledFunction(object): if out is None:
return self.scalar * self.function.gradient(x)
else:
- out.fill( self.scalar * self.function.gradient(x) )
+ self.function.gradient(x, out=out)
+ out *= self.scalar
def proximal(self, x, tau, out=None):
'''This returns the proximal operator for the function at x, tau
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFunction.py index cce519a..a019815 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFunction.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFunction.py @@ -39,14 +39,8 @@ class ZeroFunction(Function): indicator function for the set = {0} So 0 if x=0, or inf if x neq 0 ''' + return x.maximum(0).sum() - if x.shape[0]==1: - return x.maximum(0).sum() - else: - if isinstance(x, BlockDataContainer): - return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() - else: - return x.maximum(0).sum() + x.maximum(0).sum() def proximal(self, x, tau, out=None): if out is None: diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py index 1d77510..c8bd546 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py @@ -266,15 +266,30 @@ class BlockOperator(Operator): # column BlockOperator return self.get_item(0,0).domain_geometry() else: - shape = (self.shape[0], 1) - return BlockGeometry(*[el.domain_geometry() for el in self.operators], - shape=shape) + # get the geometries column wise + # we need only the geometries from the first row + # since it is compatible from __init__ + tmp = [] + for i in range(self.shape[1]): + tmp.append(self.get_item(0,i).domain_geometry()) + return BlockGeometry(*tmp) + + #shape = (self.shape[0], 1) + #return BlockGeometry(*[el.domain_geometry() for el in self.operators], + # shape=self.shape) def range_geometry(self): '''returns the range of the BlockOperator''' - shape = (self.shape[1], 1) - return BlockGeometry(*[el.range_geometry() for el in self.operators], - shape=shape) + + tmp = [] + for i in range(self.shape[0]): + tmp.append(self.get_item(i,0).range_geometry()) + return BlockGeometry(*tmp) + + + #shape = (self.shape[1], 1) + #return BlockGeometry(*[el.range_geometry() for el in self.operators], + # shape=shape) def sum_abs_row(self): @@ -312,7 +327,8 @@ class BlockOperator(Operator): if __name__ == '__main__': from ccpi.framework import ImageGeometry - from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff + from ccpi.optimisation.operators import Gradient, Identity, \ + SparseFiniteDiff, SymmetrizedGradient, ZeroOperator M, N = 4, 3 @@ -363,4 +379,39 @@ if __name__ == '__main__': + ########################################################################### + # Block Operator for TGV reconstruction + + M, N = 2,3 + ig = ImageGeometry(M, N) + ag = ig + + op11 = Gradient(ig) + op12 = Identity(op11.range_geometry()) + + op22 = SymmetrizedGradient(op11.domain_geometry()) + + op21 = ZeroOperator(ig, op22.range_geometry()) + + + op31 = Identity(ig, ag) + op32 = ZeroOperator(op22.domain_geometry(), ag) + + operator = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) ) + + z1 = operator.domain_geometry() + z2 = operator.range_geometry() + + print(z1.shape) + print(z2.shape) + + + + + + + + + +
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 835f96d..f459334 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -7,7 +7,6 @@ Created on Fri Mar 1 22:51:17 2019 """ from ccpi.optimisation.operators import LinearOperator -from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, BlockDataContainer import numpy as np @@ -42,7 +41,7 @@ class FiniteDiff(LinearOperator): #self.voxel_size = kwargs.get('voxel_size',1) # this wrongly assumes a homogeneous voxel size - self.voxel_size = self.gm_domain.voxel_size_x +# self.voxel_size = self.gm_domain.voxel_size_x def direct(self, x, out=None): @@ -318,7 +317,7 @@ class FiniteDiff(LinearOperator): def norm(self): x0 = self.gm_domain.allocate('random_int') - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0) return self.s1 diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py index 4935435..e0b8a32 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py @@ -7,7 +7,6 @@ Created on Fri Mar 1 22:50:04 2019 """ from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator -from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer import numpy from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff @@ -90,7 +89,7 @@ class Gradient(LinearOperator): def norm(self): x0 = self.gm_domain.allocate('random') - self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0) + self.s1, sall, svec = LinearOperator.PowerMethod(self, 10, x0) return self.s1 def __rmul__(self, scalar): diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py index e19304f..f304cc6 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py @@ -6,6 +6,8 @@ Created on Tue Mar 5 15:57:52 2019 """
from ccpi.optimisation.operators import Operator
+from ccpi.framework import ImageGeometry
+import numpy
class LinearOperator(Operator):
@@ -20,3 +22,46 @@ class LinearOperator(Operator): only available to linear operators'''
raise NotImplementedError
+
+ @staticmethod
+ def PowerMethod(operator, iterations, x_init=None):
+ '''Power method to calculate iteratively the Lipschitz constant'''
+
+ # Initialise random
+ if x_init is None:
+ x0 = operator.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
+ else:
+ x0 = x_init.copy()
+
+ x1 = operator.domain_geometry().allocate()
+ y_tmp = operator.range_geometry().allocate()
+ s = numpy.zeros(iterations)
+ # Loop
+ for it in numpy.arange(iterations):
+ operator.direct(x0,out=y_tmp)
+ operator.adjoint(y_tmp,out=x1)
+ x1norm = x1.norm()
+ s[it] = x1.dot(x0) / x0.squared_norm()
+ x1.multiply((1.0/x1norm), out=x0)
+ return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+
+ @staticmethod
+ def PowerMethodNonsquare(op,numiters , x_init=None):
+ '''Power method to calculate iteratively the Lipschitz constant'''
+
+ if x_init is None:
+ x0 = op.allocate_direct()
+ x0.fill(numpy.random.randn(*x0.shape))
+ else:
+ x0 = x_init.copy()
+
+ s = numpy.zeros(numiters)
+ # Loop
+ for it in numpy.arange(numiters):
+ x1 = op.adjoint(op.direct(x0))
+ x1norm = x1.norm()
+ s[it] = (x1*x0).sum() / (x0.squared_norm())
+ x0 = (1.0/x1norm)*x1
+ return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
+
+
diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperatorMatrix.py new file mode 100644 index 0000000..62e22e0 --- /dev/null +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperatorMatrix.py @@ -0,0 +1,51 @@ +import numpy +from scipy.sparse.linalg import svds +from ccpi.framework import DataContainer +from ccpi.framework import AcquisitionData +from ccpi.framework import ImageData +from ccpi.framework import ImageGeometry +from ccpi.framework import AcquisitionGeometry +from numbers import Number +from ccpi.optimisation.operators import LinearOperator +class LinearOperatorMatrix(LinearOperator): + def __init__(self,A): + self.A = A + self.s1 = None # Largest singular value, initially unknown + super(LinearOperatorMatrix, self).__init__() + + def direct(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A,x.as_array())) + else: + numpy.dot(self.A, x.as_array(), out=out.as_array()) + + + def adjoint(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A.transpose(),x.as_array())) + else: + numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) + + + def size(self): + return self.A.shape + + def get_max_sing_val(self): + # If unknown, compute and store. If known, simply return it. + if self.s1 is None: + self.s1 = svds(self.A,1,return_singular_vectors=False)[0] + return self.s1 + else: + return self.s1 + def allocate_direct(self): + '''allocates the memory to hold the result of adjoint''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((N_A,1)) + return DataContainer(out) + def allocate_adjoint(self): + '''allocate the memory to hold the result of direct''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((M_A,1)) + return DataContainer(out) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py index c38458d..243809b 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py @@ -7,7 +7,6 @@ Created on Fri Mar 1 22:53:55 2019 """ from ccpi.optimisation.operators import Gradient, Operator, LinearOperator, ScaledOperator -from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer import numpy from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff @@ -15,6 +14,12 @@ from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff class SymmetrizedGradient(Gradient): + ''' Symmetrized Gradient, denoted by E: V --> W + where V is the Range of the Gradient Operator + and W is the Range of the Symmetrized Gradient. + ''' + + def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): super(SymmetrizedGradient, self).__init__(gm_domain, bnd_cond, **kwargs) @@ -22,165 +27,218 @@ class SymmetrizedGradient(Gradient): ''' Domain of SymGrad is the Range of Gradient ''' + self.gm_domain = self.gm_range self.bnd_cond = bnd_cond self.channels = self.gm_range.get_item(0).channels - if self.correlation=='Space': - if self.channels>1: - pass - else: -# # 2D image ---> Dx v1, Dyv2, Dx - tmp = self.gm_domain.geometries + (self.gm_domain.get_item(0),) - self.gm_range = BlockGeometry(*tmp ) - self.ind1 = range(self.gm_domain.get_item(0).length) - self.ind2 = range(self.gm_domain.get_item(0).length-1, -1, -1) -# self.order = myorder = [0,1,2 3] - - elif self.correlation=='SpaceChannels': - if self.channels>1: - pass - else: - raise ValueError('No channels to correlate') - - - def direct(self, x, out=None): + tmp_gm = len(self.gm_domain.geometries)*self.gm_domain.geometries -# tmp = numpy.zeros(self.gm_range) -# tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) -# tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) -# tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + -# FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) ) -# -# return type(x)(tmp) - - tmp = [[None]*2]*2 - for i in range(2): - for j in range(2): - tmp[i][j]=FiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).adjoint(x.get_item(j)) - tmp = numpy.array(tmp) - z = 0.5 * (tmp.T + tmp) - z = z.to + self.gm_range = BlockGeometry(*tmp_gm) - return BlockDataContainer(*z.tolist()) - - - def adjoint(self, x, out=None): - pass + self.FD = FiniteDiff(self.gm_domain, direction = 0, bnd_cond = self.bnd_cond) - res = [] - for i in range(2): - tmp = ImageData(np.zeros(x.get_item(0))) - for j in range(2): - tmp += FiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).direct(x.get_item(j)) - res.append(tmp) - return res - + if self.gm_domain.shape[0]==2: + self.order_ind = [0,2,1,3] + else: + self.order_ind = [0,3,6,1,4,7,2,5,8] -# for + def direct(self, x, out=None): + + if out is None: + + tmp = [] + for i in range(self.gm_domain.shape[0]): + for j in range(x.shape[0]): + self.FD.direction = i + tmp.append(self.FD.adjoint(x.get_item(j))) + + tmp1 = [tmp[i] for i in self.order_ind] + + res = [0.5 * sum(x) for x in zip(tmp, tmp1)] + + return BlockDataContainer(*res) + + else: + + ind = 0 + for i in range(self.gm_domain.shape[0]): + for j in range(x.shape[0]): + self.FD.direction = i + self.FD.adjoint(x.get_item(j), out=out[ind]) + ind+=1 + out1 = BlockDataContainer(*[out[i] for i in self.order_ind]) + out.fill( 0.5 * (out + out1) ) + + + def adjoint(self, x, out=None): -# tmp = numpy.zeros(self.gm_domain) -# -# tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \ -# FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) -# -# tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \ -# FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1]) -# -# return type(x)(tmp) + if out is None: - def alloc_domain_dim(self): - return ImageData(numpy.zeros(self.gm_domain)) - - def alloc_range_dim(self): - return ImageData(numpy.zeros(self.range_dim)) - - def domain_dim(self): + tmp = [None]*self.gm_domain.shape[0] + i = 0 + + for k in range(self.gm_domain.shape[0]): + tmp1 = 0 + for j in range(self.gm_domain.shape[0]): + self.FD.direction = j + tmp1 += self.FD.direct(x[i]) + i+=1 + tmp[k] = tmp1 + return BlockDataContainer(*tmp) + + + else: + + tmp = self.gm_domain.allocate() + i = 0 + for k in range(self.gm_domain.shape[0]): + tmp1 = 0 + for j in range(self.gm_domain.shape[0]): + self.FD.direction = j + self.FD.direct(x[i], out=tmp[j]) + i+=1 + tmp1+=tmp[j] + out[k].fill(tmp1) +# tmp = self.adjoint(x) +# out.fill(tmp) + + + def domain_geometry(self): return self.gm_domain - def range_dim(self): + def range_geometry(self): return self.gm_range def norm(self): -# return np.sqrt(4*len(self.domainDim())) - #TODO this takes time for big ImageData - # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) - x0 = ImageData(np.random.random_sample(self.domain_dim())) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) - return self.s1 + +# TODO need dot method for BlockDataContainer +# return numpy.sqrt(4*self.gm_domain.shape[0]) + +# x0 = self.gm_domain.allocate('random') + self.s1, sall, svec = LinearOperator.PowerMethod(self, 50) + return self.s1 if __name__ == '__main__': ########################################################################### - ## Symmetrized Gradient + ## Symmetrized Gradient Tests from ccpi.framework import DataContainer from ccpi.optimisation.operators import Gradient, BlockOperator, FiniteDiff import numpy as np N, M = 2, 3 K = 2 + C = 2 ig1 = ImageGeometry(N, M) - ig2 = ImageGeometry(N, M, channels=K) + ig2 = ImageGeometry(N, M, channels=C) E1 = SymmetrizedGradient(ig1, correlation = 'Space', bnd_cond='Neumann') - E2 = SymmetrizedGradient(ig2, correlation = 'SpaceChannels', bnd_cond='Periodic') - print(E1.domain_geometry().shape) - print(E2.domain_geometry().shape) + try: + E1 = SymmetrizedGradient(ig1, correlation = 'SpaceChannels', bnd_cond='Neumann') + except: + print("No Channels to correlate") + + E2 = SymmetrizedGradient(ig2, correlation = 'SpaceChannels', bnd_cond='Neumann') + + print(E1.domain_geometry().shape, E1.range_geometry().shape) + print(E2.domain_geometry().shape, E2.range_geometry().shape) + + #check Linear operator property + + u1 = E1.domain_geometry().allocate('random_int') + u2 = E2.domain_geometry().allocate('random_int') + + # Need to allocate random_int at the Range of SymGradient + + #a1 = ig1.allocate('random_int') + #a2 = ig1.allocate('random_int') + #a3 = ig1.allocate('random_int') + + #a4 = ig1.allocate('random_int') + #a5 = ig1.allocate('random_int') + #a6 = ig1.allocate('random_int') + + # TODO allocate has to create this symmetry by default!!!!! + #w1 = BlockDataContainer(*[a1, a2, \ + # a2, a3]) + w1 = E1.range_geometry().allocate('random_int',symmetry=True) + + LHS = (E1.direct(u1) * w1).sum() + RHS = (u1 * E1.adjoint(w1)).sum() + + numpy.testing.assert_equal(LHS, RHS) - u1 = E1.gm_domain.allocate('random_int') u2 = E2.gm_domain.allocate('random_int') - - res = E1.direct(u1) + #aa1 = ig2.allocate('random_int') + #aa2 = ig2.allocate('random_int') + #aa3 = ig2.allocate('random_int') + #aa4 = ig2.allocate('random_int') + #aa5 = ig2.allocate('random_int') + #aa6 = ig2.allocate('random_int') - res1 = E1.adjoint(res) + #w2 = BlockDataContainer(*[aa1, aa2, aa3, \ + # aa2, aa4, aa5, \ + # aa3, aa5, aa6]) + w2 = E2.range_geometry().allocate('random_int',symmetry=True) + -# Dx = FiniteDiff(ig1, direction = 1, bnd_cond = 'Neumann') -# Dy = FiniteDiff(ig1, direction = 0, bnd_cond = 'Neumann') -# -# B = BlockOperator(Dy, Dx) -# V = BlockDataContainer(u1,u2) -# -# res = B.adjoint(V) + LHS1 = (E2.direct(u2) * w2).sum() + RHS1 = (u2 * E2.adjoint(w2)).sum() -# ig = (N,M) -# ig2 = (2,) + ig -# ig3 = (3,) + ig -# u1 = ig.allocate('random_int') -# w1 = E.gm_range.allocate('random_int') -# DataContainer(np.random.randint(10, size=ig3)) + numpy.testing.assert_equal(LHS1, RHS1) + out = E1.range_geometry().allocate() + E1.direct(u1, out=out) + a1 = E1.direct(u1) + numpy.testing.assert_array_equal(a1[0].as_array(), out[0].as_array()) + numpy.testing.assert_array_equal(a1[1].as_array(), out[1].as_array()) + numpy.testing.assert_array_equal(a1[2].as_array(), out[2].as_array()) + numpy.testing.assert_array_equal(a1[3].as_array(), out[3].as_array()) -# d1 = E.direct(u1) -# d2 = E.adjoint(w1) + out1 = E1.domain_geometry().allocate() + E1.adjoint(w1, out=out1) + b1 = E1.adjoint(w1) -# LHS = (d1.as_array()[0]*w1.as_array()[0] + \ -# d1.as_array()[1]*w1.as_array()[1] + \ -# 2*d1.as_array()[2]*w1.as_array()[2]).sum() -# -# RHS = (u1.as_array()[0]*d2.as_array()[0] + \ -# u1.as_array()[1]*d2.as_array()[1]).sum() -# -# -# print(LHS, RHS, E.norm()) -# + LHS_out = (out * w1).sum() + RHS_out = (u1 * out1).sum() + print(LHS_out, RHS_out) -# + out2 = E2.range_geometry().allocate() + E2.direct(u2, out=out2) + a2 = E2.direct(u2) + + out21 = E2.domain_geometry().allocate() + E2.adjoint(w2, out=out21) + b2 = E2.adjoint(w2) + LHS_out = (out2 * w2).sum() + RHS_out = (u2 * out21).sum() + print(LHS_out, RHS_out) + out = E1.range_geometry().allocate() + E1.direct(u1, out=out) + E1.adjoint(out, out=out1) + print(E1.norm()) + print(E2.norm()) -
\ No newline at end of file + +# +# +# +#
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py index a7c5f09..8168f0b 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py @@ -8,32 +8,37 @@ Created on Wed Mar 6 19:25:53 2019 import numpy as np from ccpi.framework import ImageData -from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import LinearOperator -class ZeroOp(Operator): +class ZeroOperator(LinearOperator): - def __init__(self, gm_domain, gm_range): + def __init__(self, gm_domain, gm_range=None): + + super(ZeroOperator, self).__init__() + self.gm_domain = gm_domain - self.gm_range = gm_range - super(ZeroOp, self).__init__() + self.gm_range = gm_range + if self.gm_range is None: + self.gm_range = self.gm_domain + def direct(self,x,out=None): if out is None: - return ImageData(np.zeros(self.gm_range)) + return self.gm_range.allocate() else: - return ImageData(np.zeros(self.gm_range)) + out.fill(self.gm_range.allocate()) def adjoint(self,x, out=None): if out is None: - return ImageData(np.zeros(self.gm_domain)) + return self.gm_domain.allocate() else: - return ImageData(np.zeros(self.gm_domain)) + out.fill(self.gm_domain.allocate()) def norm(self): return 0 - def domain_dim(self): + def domain_geometry(self): return self.gm_domain - def range_dim(self): + def range_geometry(self): return self.gm_range
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py index 7040d3a..23222d4 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py @@ -18,6 +18,6 @@ from .FiniteDifferenceOperator import FiniteDiff from .GradientOperator import Gradient
from .SymmetrizedGradientOperator import SymmetrizedGradient
from .IdentityOperator import Identity
-from .ZeroOperator import ZeroOp
-
+from .ZeroOperator import ZeroOperator
+from .LinearOperatorMatrix import LinearOperatorMatrix
diff --git a/Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py b/Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py index ef7a9ec..10d15fa 100644 --- a/Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py +++ b/Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py @@ -52,11 +52,11 @@ pixv = sinogram_wb.shape[1] # detectors igWB = ImageGeometry(voxel_num_x = pixh, voxel_num_y = pixv) # Load Golden angles -with open("/media/newhd/vaggelis/CCPi/CCPi-Block/CCPi-Framework/Wrappers/Python/wip/Demos/IMAT_Reconstruction/golden_angles.txt") as f: +with open("golden_angles.txt") as f: angles_string = [line.rstrip() for line in f] angles = numpy.array(angles_string).astype(float) agWB = AcquisitionGeometry('parallel', '2D', angles * numpy.pi / 180, pixh) -op_WB = AstraProjectorSimple(igWB, agWB, 'gpu') +op_WB = AstraProjectorSimple(igWB, agWB, 'cpu') sinogram_aqdata = AcquisitionData(sinogram_wb, agWB) # BackProjection diff --git a/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Poisson.py b/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Poisson.py index 4903c44..58978ae 100644 --- a/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Poisson.py +++ b/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Poisson.py @@ -48,7 +48,7 @@ noisy_data = ImageData(n1) # Regularisation Parameter alpha = 2 -method = '1' +method = '0' if method == '0': diff --git a/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_SaltPepper.py b/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_SaltPepper.py index 484fe42..4189acb 100644 --- a/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_SaltPepper.py +++ b/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_SaltPepper.py @@ -48,7 +48,7 @@ noisy_data = ImageData(n1) # Regularisation Parameter alpha = 2 -method = '1' +method = '0' if method == '0': diff --git a/Wrappers/Python/wip/test_symGrad_method1.py b/Wrappers/Python/wip/test_symGrad_method1.py deleted file mode 100644 index 36adee1..0000000 --- a/Wrappers/Python/wip/test_symGrad_method1.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Feb 22 14:53:03 2019 - -@author: evangelos -""" - -from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer - -import numpy as np -import numpy -import matplotlib.pyplot as plt - -from ccpi.optimisation.algorithms import PDHG, PDHG_old - -from ccpi.optimisation.operators import BlockOperator, Identity, \ - Gradient, SymmetrizedGradient, ZeroOperator -from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ - MixedL21Norm, BlockFunction - -from skimage.util import random_noise - -from timeit import default_timer as timer -#def dt(steps): -# return steps[-1] - steps[-2] - -# Create phantom for TGV Gaussian denoising - -N = 3 - -data = np.zeros((N,N)) - -x1 = np.linspace(0, int(N/2), N) -x2 = np.linspace(int(N/2), 0., N) -xv, yv = np.meshgrid(x1, x2) - -xv[int(N/4):int(3*N/4)-1, int(N/4):int(3*N/4)-1] = yv[int(N/4):int(3*N/4)-1, int(N/4):int(3*N/4)-1].T - -data = xv -data = data/data.max() - -plt.imshow(data) -plt.show() - -ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) -ag = ig - -# Create noisy data. Add Gaussian noise -n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.005, seed=10) -noisy_data = ImageData(n1) - - -plt.imshow(noisy_data.as_array()) -plt.title('Noisy data') -plt.show() - -alpha, beta = 0.2, 1 - -#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' - - -# Create operators -op11 = Gradient(ig) -op12 = Identity(op11.range_geometry()) - -op22 = SymmetrizedGradient(op11.domain_geometry()) - -op21 = ZeroOperator(ig, op22.range_geometry()) - - -op31 = Identity(ig, ag) -op32 = ZeroOperator(op22.domain_geometry(), ag) - -operator1 = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) ) - - -f1 = alpha * MixedL21Norm() -f2 = beta * MixedL21Norm() -f3 = ZeroFunction() -f_B3 = BlockFunction(f1, f2, f3) -g_B3 = ZeroFunction() - - - -# Create operators -op11 = Gradient(ig) -op12 = Identity(op11.range_geometry()) - -op22 = SymmetrizedGradient(op11.domain_geometry()) - -op21 = ZeroOperator(ig, op22.range_geometry()) - -operator2 = BlockOperator(op11, -1*op12, \ - op21, op22, \ - shape=(2,2) ) - -#f1 = alpha * MixedL21Norm() -#f2 = beta * MixedL21Norm() -f_B2 = BlockFunction(f1, f2) -g_B2 = 0.5 * L2NormSquared(b = noisy_data) - - -#%% - -x_old1 = operator1.domain_geometry().allocate('random_int') -y_old1 = operator1.range_geometry().allocate() - -xbar1 = x_old1.copy() -x_tmp1 = x_old1.copy() -x1 = x_old1.copy() - -y_tmp1 = y_old1.copy() -y1 = y_tmp1.copy() - -x_old2 = x_old1.copy() -y_old2 = operator2.range_geometry().allocate() - -xbar2 = x_old2.copy() -x_tmp2 = x_old2.copy() -x2 = x_old2.copy() - -y_tmp2 = y_old2.copy() -y2 = y_tmp2.copy() - -sigma = 0.4 -tau = 0.4 - -y_tmp1 = y_old1 + sigma * operator1.direct(xbar1) -y_tmp2 = y_old2 + sigma * operator2.direct(xbar2) - -numpy.testing.assert_array_equal(y_tmp1[0][0].as_array(), y_tmp2[0][0].as_array()) -numpy.testing.assert_array_equal(y_tmp1[0][1].as_array(), y_tmp2[0][1].as_array()) -numpy.testing.assert_array_equal(y_tmp1[1][0].as_array(), y_tmp2[1][0].as_array()) -numpy.testing.assert_array_equal(y_tmp1[1][1].as_array(), y_tmp2[1][1].as_array()) - - -y1 = f_B3.proximal_conjugate(y_tmp1, sigma) -y2 = f_B2.proximal_conjugate(y_tmp2, sigma) - -numpy.testing.assert_array_equal(y1[0][0].as_array(), y2[0][0].as_array()) -numpy.testing.assert_array_equal(y1[0][1].as_array(), y2[0][1].as_array()) -numpy.testing.assert_array_equal(y1[1][0].as_array(), y2[1][0].as_array()) -numpy.testing.assert_array_equal(y1[1][1].as_array(), y2[1][1].as_array()) - - -x_tmp1 = x_old1 - tau * operator1.adjoint(y1) -x_tmp2 = x_old2 - tau * operator2.adjoint(y2) - -numpy.testing.assert_array_equal(x_tmp1[0].as_array(), x_tmp2[0].as_array()) - - - - - - - - - - - -############################################################################## -#x_1 = operator1.domain_geometry().allocate('random_int') -# -#x_2 = BlockDataContainer(x_1[0], x_1[1]) -# -#res1 = operator1.direct(x_1) -#res2 = operator2.direct(x_2) -# -#print(res1[0][0].as_array(), res2[0][0].as_array()) -#print(res1[0][1].as_array(), res2[0][1].as_array()) -# -#print(res1[1][0].as_array(), res2[1][0].as_array()) -#print(res1[1][1].as_array(), res2[1][1].as_array()) -# -##res1 = op11.direct(x1[0]) - op12.direct(x1[1]) -##res2 = op21.direct(x1[0]) - op22.direct(x1[1]) |