diff options
author | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-14 20:03:53 +0100 |
---|---|---|
committer | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-14 20:03:53 +0100 |
commit | 0c0c274a4566dfa46bac56d61dc59d9c97dc8dbc (patch) | |
tree | b197e8b7c9a62ca993d3fa37e93dbecf58e2cf76 | |
parent | c6b643e939b0c26e41fea4a86d81178af2481387 (diff) | |
download | framework-0c0c274a4566dfa46bac56d61dc59d9c97dc8dbc.tar.gz framework-0c0c274a4566dfa46bac56d61dc59d9c97dc8dbc.tar.bz2 framework-0c0c274a4566dfa46bac56d61dc59d9c97dc8dbc.tar.xz framework-0c0c274a4566dfa46bac56d61dc59d9c97dc8dbc.zip |
add docstrings and fix pnorm
3 files changed, 98 insertions, 66 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 8cce290..bf627a5 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -6,36 +6,35 @@ Created on Fri Mar 8 10:01:31 2019 @author: evangelos """ -import numpy as np - from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer from numbers import Number class BlockFunction(Function): - '''A Block vector of Functions + '''BlockFunction acts as a separable sum function, i.e., - .. math:: - - f = [f_1,f_2,f_3] - f([x_1,x_2,x_3]) = f_1(x_1) + f_2(x_2) + f_3(x_3) + f = [f_1,...,f_n] + + f([x_1,...,x_n]) = f_1(x_1) + .... + f_n(x_n) ''' def __init__(self, *functions): - '''Creator''' + self.functions = functions self.length = len(self.functions) super(BlockFunction, self).__init__() def __call__(self, x): - '''evaluates the BlockFunction on the BlockDataContainer + + '''Evaluates the BlockFunction at a BlockDataContainer x :param: x (BlockDataContainer): must have as many rows as self.length returns sum(f_i(x_i)) ''' + if self.length != x.shape[0]: raise ValueError('BlockFunction and BlockDataContainer have incompatible size') t = 0 @@ -44,7 +43,12 @@ class BlockFunction(Function): return t def convex_conjugate(self, x): - '''Convex_conjugate does not take into account the BlockOperator''' + + ''' Evaluate convex conjugate of BlockFunction at x + + returns sum(f_i^{*}(x_i)) + + ''' t = 0 for i in range(x.shape[0]): t += self.functions[i].convex_conjugate(x.get_item(i)) @@ -52,7 +56,13 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): - '''proximal_conjugate does not take into account the BlockOperator''' + + ''' Evaluate Proximal Operator of tau * f(\cdot) at x + + prox_{tau*f}(x) = sum_{i} prox_{tau*f_{i}}(x_{i}) + + + ''' if out is not None: if isinstance(tau, Number): @@ -76,7 +86,14 @@ class BlockFunction(Function): def proximal(self, x, tau, out = None): - '''proximal does not take into account the BlockOperator''' + + ''' Evaluate Proximal Operator of tau * f^{*}(\cdot) at x + + prox_{tau*f^{*}}(x) = sum_{i} prox_{tau*f^{*}_{i}}(x_{i}) + + + ''' + out = [None]*self.length if isinstance(tau, Number): for i in range(self.length): @@ -88,8 +105,19 @@ class BlockFunction(Function): return BlockDataContainer(*out) def gradient(self,x, out=None): - '''FIXME: gradient returns pass''' - pass + + ''' Evaluate gradient of f at x: f'(x) + + returns: BlockDataContainer [f_{1}'(x_{1}), ... , f_{n}'(x_{n})] + + ''' + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].gradient(x.get_item(i)) + + return BlockDataContainer(*out) + if __name__ == '__main__': @@ -100,6 +128,7 @@ if __name__ == '__main__': from ccpi.framework import ImageGeometry, BlockGeometry from ccpi.optimisation.operators import Gradient, Identity, BlockOperator import numpy + import numpy as np ig = ImageGeometry(M, N) @@ -131,11 +160,7 @@ if __name__ == '__main__': numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \ res_out[1].as_array(), decimal=4) - - - - - + ########################################################################## diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index c0e8a6a..f524c5f 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -17,47 +17,48 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np from ccpi.optimisation.functions import Function, ScaledFunction -from ccpi.framework import DataContainer, ImageData, \ - ImageGeometry, BlockDataContainer +from ccpi.framework import BlockDataContainer + import functools -############################ mixed_L1,2NORM FUNCTIONS ##################### class MixedL21Norm(Function): + + ''' + f(x) = ||x||_{2,1} = \sum |x|_{2} + ''' + def __init__(self, **kwargs): super(MixedL21Norm, self).__init__() self.SymTensor = kwargs.get('SymTensor',False) - def __call__(self, x, out=None): + def __call__(self, x): - ''' Evaluates L1,2Norm at point x + ''' Evaluates L2,1Norm at point x :param: x is a BlockDataContainer ''' if not isinstance(x, BlockDataContainer): - raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x))) + raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x))) + if self.SymTensor: + #TODO fix this case param = [1]*x.shape[0] param[-1] = 2 tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - res = sum(tmp).sqrt().sum() - else: - -# tmp = [ x[i]**2 for i in range(x.shape[0])] - tmp = [ el**2 for el in x.containers ] + res = sum(tmp).sqrt().sum() -# print(x.containers) -# print(tmp) -# print(type(sum(tmp))) -# print(type(tmp)) - res = sum(tmp).sqrt().sum() -# print(res) - return res + else: + + #tmp = [ el**2 for el in x.containers ] + #res = sum(tmp).sqrt().sum() + res = x.pnorm() + + return res def gradient(self, x, out=None): return ValueError('Not Differentiable') @@ -93,20 +94,28 @@ class MixedL21Norm(Function): else: if out is None: - tmp = [ el*el for el in x.containers] - res = sum(tmp).sqrt().maximum(1.0) - frac = [el/res for el in x.containers] - res = BlockDataContainer(*frac) - return res +# tmp = [ el*el for el in x.containers] +# res = sum(tmp).sqrt().maximum(1.0) +# frac = [el/res for el in x.containers] +# res = BlockDataContainer(*frac) +# return res + + return x.divide(x.pnorm().maximum(1.0)) else: - - - res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) - res = res1.sqrt().maximum(1.0) - x.divide(res, out=out) + +# res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) +# res = res1.sqrt().maximum(1.0) +# x.divide(res, out=out) + x.divide(x.pnorm().maximum(1.0), out=out) def __rmul__(self, scalar): + + ''' Multiplication of L2NormSquared with a scalar + + Returns: ScaledFunction + + ''' return ScaledFunction(self, scalar) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 3fbb858..cb85249 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -20,6 +20,7 @@ from numbers import Number import numpy
class ScaledFunction(object):
+
'''ScaledFunction
A class to represent the scalar multiplication of an Function with a scalar.
@@ -48,12 +49,22 @@ class ScaledFunction(object): def convex_conjugate(self, x):
'''returns the convex_conjugate of the scaled function '''
- # if out is None:
- # return self.scalar * self.function.convex_conjugate(x/self.scalar)
- # else:
- # out.fill(self.function.convex_conjugate(x/self.scalar))
- # out *= self.scalar
return self.scalar * self.function.convex_conjugate(x/self.scalar)
+
+ def gradient(self, x, out=None):
+ '''Returns the gradient of the function at x, if the function is differentiable'''
+ if out is None:
+ return self.scalar * self.function.gradient(x)
+ else:
+ out.fill( self.scalar * self.function.gradient(x) )
+
+ def proximal(self, x, tau, out=None):
+ '''This returns the proximal operator for the function at x, tau
+ '''
+ if out is None:
+ return self.function.proximal(x, tau*self.scalar)
+ else:
+ out.fill( self.function.proximal(x, tau*self.scalar) )
def proximal_conjugate(self, x, tau, out = None):
'''This returns the proximal operator for the function at x, tau
@@ -76,20 +87,7 @@ class ScaledFunction(object): versions of the CIL. Use proximal instead''', DeprecationWarning)
return self.proximal(x, out=None)
- def gradient(self, x, out=None):
- '''Returns the gradient of the function at x, if the function is differentiable'''
- if out is None:
- return self.scalar * self.function.gradient(x)
- else:
- out.fill( self.scalar * self.function.gradient(x) )
- def proximal(self, x, tau, out=None):
- '''This returns the proximal operator for the function at x, tau
- '''
- if out is None:
- return self.function.proximal(x, tau*self.scalar)
- else:
- out.fill( self.function.proximal(x, tau*self.scalar) )
if __name__ == '__main__':
|