diff options
author | epapoutsellis <epapoutsellis@gmail.com> | 2019-03-08 15:42:11 +0000 |
---|---|---|
committer | epapoutsellis <epapoutsellis@gmail.com> | 2019-03-08 15:42:11 +0000 |
commit | a3db4f14e0981b0a3cfceee58c810ab4d484c116 (patch) | |
tree | 8e17d22b64fea492da56fe9c196f389e01da59a8 | |
parent | af7925fb8b5da9b0b47c1abbc29cd861968dd16c (diff) | |
download | framework-a3db4f14e0981b0a3cfceee58c810ab4d484c116.tar.gz framework-a3db4f14e0981b0a3cfceee58c810ab4d484c116.tar.bz2 framework-a3db4f14e0981b0a3cfceee58c810ab4d484c116.tar.xz framework-a3db4f14e0981b0a3cfceee58c810ab4d484c116.zip |
blockFramework
26 files changed, 2123 insertions, 1 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py new file mode 100644 index 0000000..7488310 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Mon Feb 4 16:18:06 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData +import numpy as np +import matplotlib.pyplot as plt +import time +from Operators.CompositeOperator import CompositeOperator +from Operators.CompositeDataContainer import CompositeDataContainer + +def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): + + # algorithmic parameters + if opt is None: + opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ + 'memopt': False} + + if sigma is None and tau is None: + raise ValueError('Need sigma*tau||K||^2<1') + + niter = opt['niter'] if 'niter' in opt.keys() else 1000 + tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False + stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False + + if isinstance(operator, CompositeOperator): +# if isinstance(operator, CompositeOperator_DataContainer): + x_old = operator.alloc_domain_dim() + y_old = operator.alloc_range_dim() + else: + x_old = ImageData(np.zeros(operator.domain_dim())) + y_old = ImageData(np.zeros(operator.range_dim())) + + + xbar = x_old + x_tmp = x_old + x = x_old + + y_tmp = y_old + y = y_tmp + + # relaxation parameter + theta = 1 + + t = time.time() + + objective = [] + + for i in range(niter): + + # Gradient descent, Dual problem solution + y_tmp = y_old + sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_tmp, sigma) + + # Gradient ascent, Primal problem solution + x_tmp = x_old - tau * operator.adjoint(y) + x = g.proximal(x_tmp, tau) + + #Update + xbar = x + theta * (x - x_old) + + x_old = x + y_old = y + +# pdgap + print(f(x) + g(x) + f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) + + + + + +# # TV denoising, pdgap with composite +# +# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ +# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) +# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() + + # TV denoising, pdgap with no composite + + + +# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ +# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) +# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() + + +# print(primal_obj) +# objective = primal_obj +# + + + + t_end = time.time() + + return x, t_end - t, objective + diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py new file mode 100644 index 0000000..bc9b62d --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 8 10:01:31 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import BlockDataContainer + +class BlockFunction(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + + super(BlockFunction, self).__init__() + + def __call__(self, x): + + tmp = self.operator.direct(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def call_adjoint(self, x): + + tmp = operator.adjoint(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def convex_conjugate(self, x): + + ''' Convex_conjugate does not take into account the BlockOperator''' + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + + def proximal_conjugate(self, x, tau, out = None): + + ''' proximal_conjugate does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + return BlockDataContainer(*out) + + def proximal(self, x, tau, out = None): + + ''' proximal does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + return BlockDataContainer(*out) + + def gradient(self,x, out=None): + pass
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py new file mode 100644 index 0000000..5b6defc --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:45:06 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry +from ccpi.framework import BlockDataContainer + +class FunctionOperatorComposition(Function): + + def __init__(self, function, operator): + + self.function = functions + self.operator = operator + self.grad_Lipschitz_cnst = 2*self.function.alpha*operator.norm()**2 + super(FunctionOperatorComposition, self).__init__() + + def __call__(self, x): + + return self.function(operator.direct(x)) + + def call_adjoint(self, x): + + return self.function(operator.adjoint(x)) + + def convex_conjugate(self, x): + + return self.function.convex_conjugate(x) + + def proximal(self, x, tau): + + ''' proximal does not take into account the Operator''' + + return self.function.proximal(x, tau, out=None) + + def proximal_conjugate(self, x, tau): + + ''' proximal conjugate does not take into account the Operator''' + + return self.function.proximal_conjugate(x, tau, out=None) + + def gradient(self, x): + + ''' Gradient takes into account the Operator''' + + return self.adjoint(self.function.gradient(self.operator.direct(x))) + + +class BlockFunction(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + + super(BlockFunction, self).__init__() + + def __call__(self, x): + + tmp = operator.direct(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def call_adjoint(self, x): + + tmp = operator.adjoint(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def convex_conjugate(self, x): + + ''' Convex_conjugate does not take into account the BlockOperator''' + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + + def proximal_conjugate(self, x, tau, out = None): + + ''' proximal_conjugate does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + return CompositeDataContainer(*out) + + def proximal(self, x, tau, out = None): + + ''' proximal does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + return CompositeDataContainer(*out) + + def gradient(self,x, out=None): + pass + + +class FunctionComposition_new(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + + super(FunctionComposition_new, self).__init__() + + def __call__(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + def convex_conjugate(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + def proximal(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + +if __name__ == '__main__': + + from operators import Operator + from IdentityOperator import Identity
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py new file mode 100644 index 0000000..a67b884 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 8 09:55:36 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function + + +class FunctionOperatorComposition(Function): + + def __init__(self, operator, function): + + self.function = function + self.operator = operator + self.L = 2*self.function.alpha*operator.norm()**2 + super(FunctionOperatorComposition, self).__init__() + + def __call__(self, x): + + return self.function(self.operator.direct(x)) + + def call_adjoint(self, x): + + return self.function(self.operator.adjoint(x)) + + def convex_conjugate(self, x): + + ''' convex_conjugate does not take into account the Operator''' + return self.function.convex_conjugate(x) + + def proximal(self, x, tau): + + ''' proximal does not take into account the Operator''' + + return self.function.proximal(x, tau, out=None) + + def proximal_conjugate(self, x, tau, out=None): + + ''' proximal conjugate does not take into account the Operator''' + + return self.function.proximal_conjugate(x, tau) + + def gradient(self, x): + + ''' Gradient takes into account the Operator''' + + return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) + +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py new file mode 100644 index 0000000..ec7aa5b --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:42:34 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry + + +############################ L1NORM FUNCTIONS ############################# +class SimpleL1NormEdo(Function): + + def __init__(self, alpha=1): + + super(SimpleL1Norm, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.abs().sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + ''' Soft Threshold''' + return x.sign() * (x.abs() - tau * self.alpha).maximum(0) + + def proximal_conjugate(self, x, tau): + return x.divide((x.abs()/self.alpha).maximum(1.0)) + +class L1Norm(SimpleL1NormEdo): + + def __init__(self, alpha=1, **kwargs): + + super(L1Norm, self).__init__() + self.alpha = alpha + self.b = kwargs.get('b',None) + + def __call__(self, x): + + if self.b is None: + return SimpleL1Norm.__call__(self, x) + else: + return SimpleL1Norm.__call__(self, x - self.b) + + def gradient(self, x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + if self.b is None: + return SimpleL1Norm.convex_conjugate(self, x) + else: + return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal(self, x, tau) + else: + return self.b + SimpleL1Norm.proximal(self, x - self.b , tau) + + def proximal_conjugate(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal_conjugate(self, x, tau) + else: + return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py new file mode 100644 index 0000000..8b73be6 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 7 13:10:56 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry + + +class SimpleL2NormSq(Function): + + def __init__(self, alpha=1): + + super(SimpleL2NormSq, self).__init__() + self.alpha = alpha + + # Lispchitz constant of gradient + self.L = 2*self.alpha + + def __call__(self, x): + return self.alpha * x.power(2).sum() + + def gradient(self,x): + return 2 * self.alpha * x + + def convex_conjugate(self,x): + return (1/(4*self.alpha)) * x.power(2).sum() + + def proximal(self, x, tau): + return x.divide(1+2*tau*self.alpha) + + def proximal_conjugate(self, x, tau): + return x.divide(1 + tau/(2*self.alpha) ) + + +############################ L2NORM FUNCTIONS ############################# +class L2NormSq(SimpleL2NormSq): + + def __init__(self, alpha, **kwargs): + + super(L2NormSq, self).__init__(alpha) + self.alpha = alpha + self.b = kwargs.get('b',None) + + def __call__(self, x): + + if self.b is None: + return SimpleL2NormSq.__call__(self, x) + else: + return SimpleL2NormSq.__call__(self, x - self.b) + + def gradient(self, x): + + if self.b is None: + return 2*self.alpha * x + else: + return 2*self.alpha * (x - self.b) + + def convex_conjugate(self, x): + + ''' The convex conjugate corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + ''' + + if self.b is None: + return SimpleL2NormSq.convex_conjugate(self, x) + else: + return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + ''' The proximal operator corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if self.b is None: + return SimpleL2NormSq.proximal(self, x, tau) + else: + return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + + + def proximal_conjugate(self, x, tau): + + ''' The proximal operator corresponds to the simple convex conjugate + functional i.e., f^{*}(x^{) + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + if self.b is None: + return SimpleL2NormSq.proximal_conjugate(self, x, tau) + else: + return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py new file mode 100644 index 0000000..b41cc26 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:44:10 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData +from ccpi.framework import BlockDataContainer + +class ZeroFun(Function): + + def __init__(self): + super(ZeroFun, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + ''' This is the support function sup <x, x^{*}> which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + + if x.shape[0]==1: + return x.maximum(0).sum() + else: + if isinstance(x, CompositeDataContainer): + return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() + else: + return x.maximum(0).sum() + x.maximum(0).sum() + + def proximal(self,x,tau): + return x.copy() + + def proximal_conjugate(self, x, tau): + return 0
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py new file mode 100644 index 0000000..7ce617a --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- + + +from .ZeroFun import ZeroFun +from .L1Norm import * +from .L2NormSquared import * +from .mixed_L12Norm import * +from .FunctionOperatorComposition import FunctionOperatorComposition +from .BlockFunction import BlockFunction + diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..660532e --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..075cdfb --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..f564eff --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..4ef959d --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..ad1b296 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..f2bf70f --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..1321257 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc Binary files differnew file mode 100644 index 0000000..d43e3ad --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc diff --git a/Wrappers/Python/ccpi/optimisation/functions/functions.py b/Wrappers/Python/ccpi/optimisation/functions/functions.py new file mode 100644 index 0000000..f40abb9 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/functions.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 7 13:10:56 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry +from operators import CompositeDataContainer, Identity, CompositeOperator +from numbers import Number + + +############################ L2NORM FUNCTIONS ############################# +class SimpleL2NormSq(Function): + + def __init__(self, alpha=1): + + super(SimpleL2NormSq, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.power(2).sum() + + def gradient(self,x): + return 2 * self.alpha * x + + def convex_conjugate(self,x): + return (1/4*self.alpha) * x.power(2).sum() + + def proximal(self, x, tau): + return x.divide(1+2*tau*self.alpha) + + def proximal_conjugate(self, x, tau): + return x.divide(1 + tau/2*self.alpha ) + + +class L2NormSq(SimpleL2NormSq): + + def __init__(self, A, b = None, alpha=1, **kwargs): + + super(L2NormSq, self).__init__(alpha=alpha) + self.alpha = alpha + self.A = A + self.b = b + + def __call__(self, x): + + if self.b is None: + return SimpleL2NormSq.__call__(self, self.A.direct(x)) + else: + return SimpleL2NormSq.__call__(self, self.A.direct(x) - self.b) + + def convex_conjugate(self, x): + + ''' The convex conjugate corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + ''' + + if self.b is None: + return SimpleL2NormSq.convex_conjugate(self, x) + else: + return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() + + def gradient(self, x): + + if self.b is None: + return 2*self.alpha * self.A.adjoint(self.A.direct(x)) + else: + return 2*self.alpha * self.A.adjoint(self.A.direct(x) - self.b) + + def proximal(self, x, tau): + + ''' The proximal operator corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if self.b is None: + return SimpleL2NormSq.proximal(self, x, tau) + else: + return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + + + def proximal_conjugate(self, x, tau): + + ''' The proximal operator corresponds to the simple convex conjugate + functional i.e., f^{*}(x^{) + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + if self.b is None: + return SimpleL2NormSq.proximal_conjugate(self, x, tau) + else: + return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) + + +############################ L1NORM FUNCTIONS ############################# +class SimpleL1Norm(Function): + + def __init__(self, alpha=1): + + super(SimpleL1Norm, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.abs().sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + ''' Soft Threshold''' + return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) + + def proximal_conjugate(self, x, tau): + return x.divide((x.abs()/self.alpha).maximum(1.0)) + +class L1Norm(SimpleL1Norm): + + def __init__(self, A, b = None, alpha=1, **kwargs): + + super(L1Norm, self).__init__() + self.alpha = alpha + self.A = A + self.b = b + + def __call__(self, x): + + if self.b is None: + return SimpleL1Norm.__call__(self, self.A.direct(x)) + else: + return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) + + def gradient(self, x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + if self.b is None: + return SimpleL1Norm.convex_conjugate(self, x) + else: + return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal(self, x, tau) + else: + return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) + + def proximal_conjugate(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal_conjugate(self, x, tau) + else: + return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) + + +############################ mixed_L1,2NORM FUNCTIONS ############################# +class mixed_L12Norm(Function): + + def __init__(self, A, b=None, alpha=1, **kwargs): + + super(mixed_L12Norm, self).__init__() + self.alpha = alpha + self.A = A + self.b = b + + self.sym_grad = kwargs.get('sym_grad',False) + + + + def gradient(self,x): + return ValueError('Not Differentiable') + + + def __call__(self,x): + + y = self.A.direct(x) + eucl_norm = ImageData(y.power(2).sum(axis=0)).sqrt() + eucl_norm.__isub__(self.b) + return eucl_norm.sum() * self.alpha + + def convex_conjugate(self,x): + return 0 + + def proximal_conjugate(self, x, tau): + + if self.b is None: + + if self.sym_grad: + tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha + res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) + + else: + res = (x - tau*self.b)/ ((x - tau*self.b)).abs().maximum(1.0) + + return res + + +#%% + +class ZeroFun(Function): + + def __init__(self): + super(ZeroFun, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + ''' This is the support function sup <x, x^{*}> which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + return x.maximum(0).sum() + + def proximal(self,x,tau): + return x.copy() + + def proximal_conjugate(self, x, tau): + return 0 + + +class CompositeFunction(Function): + + def __init__(self, *args): + self.functions = args + self.length = len(self.functions) + + def get_item(self, ind): + return self.functions[ind] + + def __call__(self,x): + + t = 0 + for i in range(self.length): + for j in range(x.shape[0]): + t +=self.functions[i](x.get_item(j)) + return t + + def convex_conjugate(self, x): + + z = 0 + t = 0 + for i in range(x.shape[0]): + t += self.functions[z].convex_conjugate(x.get_item(i)) + z += 1 + + return t + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + + + + +if __name__ == '__main__': + + N = 3 + ig = (N,N) + ag = ig + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = CompositeOperator((2,1), op1, op2 ) + + # Create functions + alpha = 1 + noisy_data = ImageData(np.random.randint(10, size=ag)) + f = CompositeFunction(L1Norm(op1,alpha), \ + L2NormSq(op2, noisy_data, c = 0.5, memopt = False) ) + + u = ImageData(np.random.randint(10, size=ig)) + uComp = CompositeDataContainer(u) + + print(f(uComp)) # This is f(Kx) = f1(K1*u) + f2(K2*u) + + f1 = L1Norm(op1,alpha) + f2 = L2NormSq(op2, noisy_data, c = 0.5, memopt = False) + + print(f1(u) + f2(u)) + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py new file mode 100644 index 0000000..0ce1d28 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:43:12 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry + + + +############################ mixed_L1,2NORM FUNCTIONS ############################# +class mixed_L12Norm(Function): + + def __init__(self, alpha, **kwargs): + + super(mixed_L12Norm, self).__init__() + + self.alpha = alpha + self.b = kwargs.get('b',None) + self.sym_grad = kwargs.get('sym_grad',False) + + def __call__(self,x): + + if self.b is None: + tmp1 = x + else: + tmp1 = x - self.b +# + if self.sym_grad: + tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) + else: + tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() + + return self.alpha*tmp.sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + pass + + def proximal_conjugate(self, x, tau): + + if self.sym_grad: + tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha + res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) + + return res + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(mixed_L12Norm(self.alpha), operator) + else: + return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) + diff --git a/Wrappers/Python/ccpi/optimisation/functions/test_functions.py b/Wrappers/Python/ccpi/optimisation/functions/test_functions.py new file mode 100644 index 0000000..54a952a --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/test_functions.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Mar 2 19:24:37 2019 + +@author: evangelos +""" + +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 7 13:10:56 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry +from operators import CompositeDataContainer, Identity, CompositeOperator +from numbers import Number +from GradientOperator import Gradient + + +class SimpleL2NormSq(Function): + + def __init__(self, alpha=1): + + super(SimpleL2NormSq, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.power(2).sum() + + def gradient(self,x): + return 2 * self.alpha * x + + def convex_conjugate(self,x): + return (1/(4*self.alpha)) * x.power(2).sum() + + def proximal(self, x, tau): + return x.divide(1+2*tau*self.alpha) + + def proximal_conjugate(self, x, tau): + return x.divide(1 + tau/(2*self.alpha) ) + + +############################ L2NORM FUNCTIONS ############################# +class L2NormSq(SimpleL2NormSq): + + def __init__(self, alpha, **kwargs): + + super(L2NormSq, self).__init__(alpha) + self.alpha = alpha + self.b = kwargs.get('b',None) + self.L = 1 + + def __call__(self, x): + + if self.b is None: + return SimpleL2NormSq.__call__(self, x) + else: + return SimpleL2NormSq.__call__(self, x - self.b) + + def gradient(self, x): + + if self.b is None: + return 2*self.alpha * x + else: + return 2*self.alpha * (x - self.b) + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(L2NormSq(self.alpha), operator) + else: + return FunctionComposition(L2NormSq(self.alpha, b=self.b), operator) + + def convex_conjugate(self, x): + + ''' The convex conjugate corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + ''' + + if self.b is None: + return SimpleL2NormSq.convex_conjugate(self, x) + else: + return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + ''' The proximal operator corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if self.b is None: + return SimpleL2NormSq.proximal(self, x, tau) + else: + return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + + + def proximal_conjugate(self, x, tau): + + ''' The proximal operator corresponds to the simple convex conjugate + functional i.e., f^{*}(x^{) + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + if self.b is None: + return SimpleL2NormSq.proximal_conjugate(self, x, tau) + else: + return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) + + +############################ L1NORM FUNCTIONS ############################# +class SimpleL1Norm(Function): + + def __init__(self, alpha=1): + + super(SimpleL1Norm, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.abs().sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + ''' Soft Threshold''' + return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) + + def proximal_conjugate(self, x, tau): + return x.divide((x.abs()/self.alpha).maximum(1.0)) + +class L1Norm(SimpleL1Norm): + + def __init__(self, alpha=1, **kwargs): + + super(L1Norm, self).__init__() + self.alpha = alpha + + self.A = kwargs.get('A',None) + self.b = kwargs.get('b',None) + + def __call__(self, x): + + if self.b is None: + return SimpleL1Norm.__call__(self, self.A.direct(x)) + else: + return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) + + def eval_norm(self, x): + + return SimpleL1Norm.__call__(self, x) + + def gradient(self, x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + if self.b is None: + return SimpleL1Norm.convex_conjugate(self, x) + else: + return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal(self, x, tau) + else: + return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) + + def proximal_conjugate(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal_conjugate(self, x, tau) + else: + return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) + + +############################ mixed_L1,2NORM FUNCTIONS ############################# +class mixed_L12Norm(Function): + + def __init__(self, alpha, **kwargs): + + super(mixed_L12Norm, self).__init__() + + self.alpha = alpha + self.b = kwargs.get('b',None) + self.sym_grad = kwargs.get('sym_grad',False) + + def __call__(self,x): + + if self.b is None: + tmp1 = x + else: + tmp1 = x - self.b +# + if self.sym_grad: + tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) + else: + tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() + + return self.alpha*tmp.sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + pass + + def proximal_conjugate(self, x, tau): + + if self.sym_grad: + tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha + res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) + + return res + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(mixed_L12Norm(self.alpha), operator) + else: + return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) + + +#%% + +class ZeroFun(Function): + + def __init__(self): + super(ZeroFun, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + ''' This is the support function sup <x, x^{*}> which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + + if x.shape[0]==1: + return x.maximum(0).sum() + else: + return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() + + def proximal(self,x,tau): + return x.copy() + + def proximal_conjugate(self, x, tau): + return 0 + + +class CompositeFunction(Function): + + def __init__(self, *functions, blockMatrix): + + self.blockMatrix = blockMatrix + self.functions = functions + self.length = len(self.functions) + + def get_item(self, ind): + return self.functions[ind] + + def __call__(self,x): + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + + def convex_conjugate(self, x): + + z = 0 + t = 0 + for i in range(x.shape[0]): + t += self.functions[z].convex_conjugate(x.get_item(i)) + z += 1 + + return t + + def proximal(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + +# +class FunctionComposition(Function): + + def __init__(self, function, operator): + + self.function = function + self.alpha = self.function.alpha + self.b = self.function.b + self.operator = operator + + + super(FunctionComposition, self).__init__() + + ''' overide call and gradient ''' + def __call__(self, x): + return self.function(x) + + def gradient(self,x): + return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) + + ''' Same as in the parent class''' + def proximal(self,x, tau): + return self.function.proximal(x, tau) + + def proximal_conjugate(self,x, tau): + return self.function.proximal_conjugate(x, tau) + + def convex_conjugate(self,x): + return self.function.convex_conjugate(x) + + +class FunctionComposition_new(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + +# if self.length==1: +# self.L = self.functions[0].alpha*(self.operator.norm()**2) + + # length == to operator.shape[0]# + super(FunctionComposition_new, self).__init__() + + def __call__(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + def convex_conjugate(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + def proximal(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + +if __name__ == '__main__': + + N = 3 + ig = (N,N) + ag = ig + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = CompositeOperator((2,1), op1, op2 ) + + # Create functions + noisy_data = ImageData(np.random.randint(10, size=ag)) + + d = ImageData(np.random.randint(10, size=ag)) + + f = mixed_L12Norm(alpha = 1).composition_with(op1) + g = L2NormSq(alpha=0.5, b=noisy_data) + + # Compare call of f + a1 = ImageData(op1.direct(d).power(2).sum(axis=0)).sqrt().sum() + print(a1, f(d)) + + # Compare call of g + a2 = g.alpha*(d - noisy_data).power(2).sum() + print(a2, g(d)) + + # Compare convex conjugate of g + a3 = 0.5 * d.power(2).sum() + (d*noisy_data).sum() + print( a3, g.convex_conjugate(d)) + + + + + +# +# f1 = L2NormSq(alpha=1, b=noisy_data) +# print(f1(noisy_data)) +# +# f2 = L2NormSq(alpha=5, b=noisy_data).composition_with(op2) +# print(f2(noisy_data)) +# +# print(f1.gradient(noisy_data).as_array()) +# print(f2.gradient(noisy_data).as_array()) +## +# print(f1.proximal(noisy_data,1).as_array()) +# print(f2.proximal(noisy_data,1).as_array()) +# +# +# f3 = mixed_L12Norm(alpha = 1).composition_with(op1) +# print(f3(noisy_data)) +# +# print(ImageData(op1.direct(noisy_data).power(2).sum(axis=0)).sqrt().sum()) +# +# print( 5*(op2.direct(d) - noisy_data).power(2).sum(), f2(d)) +# +# from functions import mixed_L12Norm as mixed_L12Norm_old +# +# print(mixed_L12Norm_old(op1,None,alpha)(noisy_data)) + + + # + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py new file mode 100644 index 0000000..16cd215 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -0,0 +1,314 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 1 22:51:17 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.framework import ImageData, BlockDataContainer +import numpy as np + +class FiniteDiff(Operator): + + # Works for Neum/Symmetric & periodic boundary conditions + # TODO add central differences??? + # TODO not very well optimised, too many conditions + # TODO add discretisation step, should get that from imageGeometry + + # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] + # Grad_order = ['channels', 'direction_y', 'direction_x'] + # Grad_order = ['direction_z', 'direction_y', 'direction_x'] + # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] + + def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): + + super(FiniteDiff, self).__init__() + + self.gm_domain = gm_domain + self.gm_range = gm_range + self.direction = direction + self.bnd_cond = bnd_cond + + if self.gm_range is None: + self.gm_range = self.gm_domain + + if self.direction + 1 > len(gm_domain): + raise ValueError('Gradient directions more than geometry domain') + + def direct(self, x, out=None): + +# x_asarr = x.as_array() + x_asarr = x + x_sz = len(x.shape) + + if out is None: + out = np.zeros(x.shape) + + fd_arr = out + + ######################## Direct for 2D ############################### + if x_sz == 2: + + if self.direction == 1: + + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 0: + + np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Direct for 3D ############################### + elif x_sz == 3: + + if self.direction == 0: + + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + + if self.direction == 2: + + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Direct for 4D ############################### + elif x_sz == 4: + + if self.direction == 0: + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 3: + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + else: + raise NotImplementedError + + res = out + return res + + def adjoint(self, x, out=None): + +# x_asarr = x.as_array() + x_asarr = x + x_sz = len(x.shape) + + if out is None: + out = np.zeros(x.shape) + + fd_arr = out + + ######################## Adjoint for 2D ############################### + if x_sz == 2: + + if self.direction == 1: + + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] ) + np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] ) + + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 0: + + np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] ) + np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] ) + + else: + raise ValueError('No valid boundary conditions') + + ######################## Adjoint for 3D ############################### + elif x_sz == 3: + + if self.direction == 0: + + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] ) + np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] ) + np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] ) + np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Adjoint for 4D ############################### + elif x_sz == 4: + + if self.direction == 0: + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] ) + np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] ) + np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] ) + np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 3: + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] ) + np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] ) + else: + raise ValueError('No valid boundary conditions') + + else: + raise NotImplementedError + + res = out + return res + + def range_dim(self): + return self.gm_range + + def domain_dim(self): + return self.gm_domain + + def norm(self): + x0 = ImageData(np.random.random_sample(self.domain_dim())) + self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + return self.s1 + + + + +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py new file mode 100644 index 0000000..3dcc1bd --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 1 22:50:04 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.framework import ImageData, BlockDataContainer +import numpy as np +from ccpi.optimisation.operators import FiniteDiff + +#%% + +class Gradient(Operator): + + def __init__(self, gm_domain, gm_range=None, bnd_cond = 'Neumann', **kwargs): + + super(Gradient, self).__init__() + + self.gm_domain = gm_domain # Domain of Grad Operator + self.gm_range = gm_range # Range of Grad Operator + self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences + + + if self.gm_range is None: + self.gm_range = ((len(self.gm_domain),)+self.gm_domain) + + # Kwargs Default options + self.memopt = kwargs.get('memopt',False) + self.correlation = kwargs.get('correlation','Space') + + #TODO not tested yet, operator norm??? + self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) + + + def direct(self, x, out=None): + + tmp = np.zeros(self.gm_range) + for i in range(len(self.gm_domain)): + tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] +# return type(x)(tmp) + return type(x)(tmp) + + def adjoint(self, x, out=None): + + tmp = np.zeros(self.gm_domain) + for i in range(len(self.gm_domain)): + tmp+=FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).adjoint(x.as_array()[i])/self.voxel_size[i] + return type(x)(-tmp) + + def alloc_domain_dim(self): + return ImageData(np.zeros(self.gm_domain)) + + def alloc_range_dim(self): + return ImageData(np.zeros(self.range_dim)) + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range + + def norm(self): +# return np.sqrt(4*len(self.domainDim())) + #TODO this takes time for big ImageData + # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) + x0 = ImageData(np.random.random_sample(self.domain_dim())) + self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + return self.s1 + + +if __name__ == '__main__': + + N, M = (200,300) + ig = (N,M) + G = Gradient(ig) + u = DataContainer(np.random.randint(10, size=G.domain_dim())) + w = DataContainer(np.random.randint(10, size=G.range_dim())) +# w = [DataContainer(np.random.randint(10, size=G.domain_dim())),\ +# DataContainer(np.random.randint(10, size=G.domain_dim()))] + + # domain_dim + print('Domain {}'.format(G.domain_dim())) + + # range_dim + print('Range {}'.format(G.range_dim())) + + # Direct + z = G.direct(u) + + # Adjoint + z1 = G.adjoint(w) + + print(z) + print(z1) + + LHS = (G.direct(u)*w).sum() + RHS = (u * G.adjoint(w)).sum() +# + print(LHS,RHS) + print(G.norm()) + +# print(G.adjoint(G.direct(u))) + + + + + + + + + + + + + + + + + + +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py new file mode 100644 index 0000000..d49cb30 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:30:51 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator + + +class Identity(Operator): + + def __init__(self, gm_domain, gm_range=None): + + self.gm_domain = gm_domain + self.gm_range = gm_range + if self.gm_range is None: + self.gm_range = self.gm_domain + + super(Identity, self).__init__() + + def direct(self,x,out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def adjoint(self,x, out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def norm(self): + return 1.0 + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py new file mode 100644 index 0000000..d908e49 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 1 22:53:55 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import FiniteDiff +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.framework import ImageData, DataContainer +import numpy as np + + +class SymmetrizedGradient(Operator): + + def __init__(self, gm_domain, gm_range, bnd_cond = 'Neumann', **kwargs): + + super(SymmetrizedGradient, self).__init__() + + self.gm_domain = gm_domain # Domain of Grad Operator + self.gm_range = gm_range # Range of Grad Operator + self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences + + # Kwargs Default options + self.memopt = kwargs.get('memopt',False) + self.correlation = kwargs.get('correlation','Space') + + #TODO not tested yet, operator norm??? + self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) + + + def direct(self, x, out=None): + + tmp = np.zeros(self.gm_range) + tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) + tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + + FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) ) + + return type(x)(tmp) + + + def adjoint(self, x, out=None): + + tmp = np.zeros(self.gm_domain) + + tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \ + FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + + tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \ + FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1]) + + return type(x)(tmp) + + def alloc_domain_dim(self): + return ImageData(np.zeros(self.gm_domain)) + + def alloc_range_dim(self): + return ImageData(np.zeros(self.range_dim)) + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range + + def norm(self): +# return np.sqrt(4*len(self.domainDim())) + #TODO this takes time for big ImageData + # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) + x0 = ImageData(np.random.random_sample(self.domain_dim())) + self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + return self.s1 + + + +if __name__ == '__main__': + + ########################################################################### + ## Symmetrized Gradient + + N, M = 2, 3 + ig = (N,M) + ig2 = (2,) + ig + ig3 = (3,) + ig + u1 = DataContainer(np.random.randint(10, size=ig2)) + w1 = DataContainer(np.random.randint(10, size=ig3)) + + E = SymmetrizedGradient(ig2,ig3) + + d1 = E.direct(u1) + d2 = E.adjoint(w1) + + LHS = (d1.as_array()[0]*w1.as_array()[0] + \ + d1.as_array()[1]*w1.as_array()[1] + \ + 2*d1.as_array()[2]*w1.as_array()[2]).sum() + + RHS = (u1.as_array()[0]*d2.as_array()[0] + \ + u1.as_array()[1]*d2.as_array()[1]).sum() + + + print(LHS, RHS, E.norm()) + + +# + + + + + + + + + + +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py new file mode 100644 index 0000000..a7c5f09 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:25:53 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.framework import ImageData +from ccpi.optimisation.operators import Operator + +class ZeroOp(Operator): + + def __init__(self, gm_domain, gm_range): + self.gm_domain = gm_domain + self.gm_range = gm_range + super(ZeroOp, self).__init__() + + def direct(self,x,out=None): + if out is None: + return ImageData(np.zeros(self.gm_range)) + else: + return ImageData(np.zeros(self.gm_range)) + + def adjoint(self,x, out=None): + if out is None: + return ImageData(np.zeros(self.gm_domain)) + else: + return ImageData(np.zeros(self.gm_domain)) + + def norm(self): + return 0 + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index cc307e0..1e86efc 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -10,3 +10,11 @@ from .LinearOperator import LinearOperator from .ScaledOperator import ScaledOperator
from .BlockOperator import BlockOperator
from .BlockScaledOperator import BlockScaledOperator
+
+
+from .FiniteDifferenceOperator import FiniteDiff
+from .GradientOperator import Gradient
+from .SymmetrizedGradientOperator import SymmetrizedGradient
+from .IdentityOperator import Identity
+from .ZeroOperator import ZeroOp
+
diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index 630e33e..87930b5 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -34,7 +34,8 @@ setup( packages=['ccpi' , 'ccpi.io', 'ccpi.framework', 'ccpi.optimisation', 'ccpi.optimisation.operators', - 'ccpi.optimisation.algorithms'], + 'ccpi.optimisation.algorithms', + 'ccpi.optimisation.functions'], # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine |