summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorepapoutsellis <epapoutsellis@gmail.com>2019-04-12 16:00:51 +0100
committerepapoutsellis <epapoutsellis@gmail.com>2019-04-12 16:00:51 +0100
commit4c88537805e864b1d6fdf2d40a9d147bf72bcbe3 (patch)
treeebb38adb850742ddca1dbb6d0709e1827c745ed6
parent474767cce1d559b7790824b33ed6244be62e9666 (diff)
downloadframework-4c88537805e864b1d6fdf2d40a9d147bf72bcbe3.tar.gz
framework-4c88537805e864b1d6fdf2d40a9d147bf72bcbe3.tar.bz2
framework-4c88537805e864b1d6fdf2d40a9d147bf72bcbe3.tar.xz
framework-4c88537805e864b1d6fdf2d40a9d147bf72bcbe3.zip
fix memopt and docstrings
-rwxr-xr-xWrappers/Python/ccpi/framework/BlockDataContainer.py6
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L1Norm.py141
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py23
-rwxr-xr-xWrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py5
-rwxr-xr-xWrappers/Python/ccpi/optimisation/functions/ScaledFunction.py3
5 files changed, 95 insertions, 83 deletions
diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py
index 529a1ce..75ee4b2 100755
--- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py
+++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py
@@ -97,7 +97,7 @@ class BlockDataContainer(object):
a = el.is_compatible(other)
else:
a = el.shape == other.shape
- print ("current element" , el.shape, "other ", other.shape, "same shape" , a)
+# print ("current element" , el.shape, "other ", other.shape, "same shape" , a)
ret = ret and a
return ret
#return self.get_item(0).shape == other.shape
@@ -468,3 +468,7 @@ class BlockDataContainer(object):
'''Inline truedivision'''
return self.__idiv__(other)
+
+
+
+
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
index 163eefa..4e53f2c 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
@@ -16,11 +16,82 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""
-Created on Wed Mar 6 19:42:34 2019
-@author: evangelos
-"""
+
+from ccpi.optimisation.functions import Function
+from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
+from ccpi.optimisation.operators import ShrinkageOperator
+
+
+class L1Norm(Function):
+
+ '''
+
+ Class: L1Norm
+
+ Cases: a) f(x) = ||x||_{1}
+
+ b) f(x) = ||x - b||_{1}
+
+ '''
+
+ def __init__(self, **kwargs):
+
+ super(L1Norm, self).__init__()
+ self.b = kwargs.get('b',None)
+
+ def __call__(self, x):
+
+ ''' Evaluate L1Norm at x: f(x) '''
+
+ y = x
+ if self.b is not None:
+ y = x - self.b
+ return y.abs().sum()
+
+ def gradient(self,x):
+ #TODO implement subgradient???
+ return ValueError('Not Differentiable')
+
+ def convex_conjugate(self,x):
+ #TODO implement Indicator infty???
+
+ y = 0
+ if self.b is not None:
+ y = 0 + (self.b * x).sum()
+ return y
+
+ def proximal(self, x, tau, out=None):
+
+ # TODO implement shrinkage operator, we will need it later e.g SplitBregman
+
+ if out is None:
+ if self.b is not None:
+ return self.b + ShrinkageOperator.__call__(self, x - self.b, tau)
+ else:
+ return ShrinkageOperator.__call__(self, x, tau)
+ else:
+ if self.b is not None:
+ out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau))
+ else:
+ out.fill(ShrinkageOperator.__call__(self, x, tau))
+
+ def proximal_conjugate(self, x, tau, out=None):
+
+ if out is None:
+ if self.b is not None:
+ return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))
+ else:
+ return x.divide(x.abs().maximum(1.0))
+ else:
+ if self.b is not None:
+ out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)))
+ else:
+ out.fill(x.divide(x.abs().maximum(1.0)) )
+
+ def __rmul__(self, scalar):
+ return ScaledFunction(self, scalar)
+
#import numpy as np
##from ccpi.optimisation.funcs import Function
@@ -92,67 +163,7 @@ Created on Wed Mar 6 19:42:34 2019
#
###############################################################################
-from ccpi.optimisation.functions import Function
-from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
-from ccpi.optimisation.operators import ShrinkageOperator
-
-
-class L1Norm(Function):
-
- def __init__(self, **kwargs):
-
- super(L1Norm, self).__init__()
- self.b = kwargs.get('b',None)
-
- def __call__(self, x):
-
- y = x
- if self.b is not None:
- y = x - self.b
- return y.abs().sum()
-
- def gradient(self,x):
- #TODO implement subgradient???
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
- #TODO implement Indicator infty???
-
- y = 0
- if self.b is not None:
- y = 0 + (self.b * x).sum()
- return y
-
- def proximal(self, x, tau, out=None):
-
- # TODO implement shrinkage operator, we will need it later e.g SplitBregman
-
- if out is None:
- if self.b is not None:
- return self.b + ShrinkageOperator.__call__(self, x - self.b, tau)
- else:
- return ShrinkageOperator.__call__(self, x, tau)
- else:
- if self.b is not None:
- out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau))
- else:
- out.fill(ShrinkageOperator.__call__(self, x, tau))
-
- def proximal_conjugate(self, x, tau, out=None):
-
- if out is None:
- if self.b is not None:
- return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))
- else:
- return x.divide(x.abs().maximum(1.0))
- else:
- if self.b is not None:
- out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)))
- else:
- out.fill(x.divide(x.abs().maximum(1.0)) )
-
- def __rmul__(self, scalar):
- return ScaledFunction(self, scalar)
+
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
index 903dafa..7397cfb 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
@@ -17,19 +17,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import numpy
from ccpi.optimisation.functions import Function
from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
class L2NormSquared(Function):
- '''
-
- Class: L2NormSquared
-
- Cases: a) f(x) = ||x||^{2}
+ '''
+
+ Cases: a) f(x) = \|x\|^{2}_{2}
- b) f(x) = ||x - b||^{2}, b
+ b) f(x) = ||x - b||^{2}_{2}
'''
@@ -50,9 +47,7 @@ class L2NormSquared(Function):
except AttributeError as ae:
# added for compatibility with SIRF
return (y.norm()**2)
-
-
-
+
def gradient(self, x, out=None):
''' Evaluate gradient of L2NormSquared at x: f'(x) '''
@@ -127,11 +122,10 @@ class L2NormSquared(Function):
def __rmul__(self, scalar):
- ''' Allows multiplication of L2NormSquared with a scalar
+ ''' Multiplication of L2NormSquared with a scalar
Returns: ScaledFunction
-
-
+
'''
return ScaledFunction(self, scalar)
@@ -139,7 +133,8 @@ class L2NormSquared(Function):
if __name__ == '__main__':
-
+ from ccpi.framework import ImageGeometry
+ import numpy
# TESTS for L2 and scalar * L2
M, N, K = 2,3,5
diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py
index 24c47f4..c0e8a6a 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py
@@ -99,11 +99,12 @@ class MixedL21Norm(Function):
res = BlockDataContainer(*frac)
return res
else:
+
+
res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 )
res = res1.sqrt().maximum(1.0)
x.divide(res, out=out)
- #for i,el in enumerate(x.containers):
- # el.divide(res, out=out.get_item(i))
+
def __rmul__(self, scalar):
return ScaledFunction(self, scalar)
diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py
index 464b944..3fbb858 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py
@@ -61,7 +61,8 @@ class ScaledFunction(object):
if out is None:
return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)
else:
- out.fill(self.scalar*self.function.proximal_conjugate(x/self.scalar, tau/self.scalar))
+ self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out=out)
+ out *= self.scalar
def grad(self, x):
'''Alias of gradient(x,None)'''