summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEdoardo Pasca <edo.paskino@gmail.com>2019-07-02 10:50:59 +0100
committerGitHub <noreply@github.com>2019-07-02 10:50:59 +0100
commitd63f2e879162b32936556fa59bb9f89cb5d5f645 (patch)
tree992acfbc335fcc292c5d69f62fc7491c64a0a011
parentc890ffb05a36fde36093a19b35921c5a2d84b2a3 (diff)
parentd27e46190afe9d7901afcd1442a798b7382a01f0 (diff)
downloadframework-d63f2e879162b32936556fa59bb9f89cb5d5f645.tar.gz
framework-d63f2e879162b32936556fa59bb9f89cb5d5f645.tar.bz2
framework-d63f2e879162b32936556fa59bb9f89cb5d5f645.tar.xz
framework-d63f2e879162b32936556fa59bb9f89cb5d5f645.zip
Merge pull request #339 from vais-ral/remove_wip
removed wip directory
-rw-r--r--Wrappers/Python/wip/CGLS_tikhonov.py196
-rw-r--r--Wrappers/Python/wip/CreatePhantom.py242
-rw-r--r--Wrappers/Python/wip/Demos/FISTA_vs_CGLS.py119
-rw-r--r--Wrappers/Python/wip/Demos/FISTA_vs_PDHG.py120
-rw-r--r--Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py164
-rw-r--r--Wrappers/Python/wip/Demos/IMAT_Reconstruction/golden_angles.txt186
-rw-r--r--Wrappers/Python/wip/Demos/LeastSq_CGLS_FISTA_PDHG.py154
-rw-r--r--Wrappers/Python/wip/Demos/PDHG_TGV_Denoising_SaltPepper.py194
-rw-r--r--Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Gaussian_DiagPrecond.py208
-rw-r--r--Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D.py245
-rw-r--r--Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D_time.py169
-rw-r--r--Wrappers/Python/wip/Demos/PDHG_Tikhonov_Tomo2D.py108
-rw-r--r--Wrappers/Python/wip/Demos/PDHG_vs_CGLS.py127
-rw-r--r--Wrappers/Python/wip/Demos/check_blockOperator_sum_row_cols.py89
-rw-r--r--Wrappers/Python/wip/Demos/check_precond.py182
-rw-r--r--Wrappers/Python/wip/compare_CGLS_algos.py133
-rw-r--r--Wrappers/Python/wip/demo_SIRT.py205
-rw-r--r--Wrappers/Python/wip/demo_box_constraints_FISTA.py158
-rw-r--r--Wrappers/Python/wip/demo_colourbay.py137
-rw-r--r--Wrappers/Python/wip/demo_compare_cvx.py306
-rwxr-xr-xWrappers/Python/wip/demo_gradient_descent.py295
-rw-r--r--Wrappers/Python/wip/demo_imat_multichan_RGLTK.py151
-rw-r--r--Wrappers/Python/wip/demo_imat_whitebeam.py138
-rwxr-xr-xWrappers/Python/wip/demo_memhandle.py193
-rwxr-xr-xWrappers/Python/wip/fix_test.py208
-rwxr-xr-xWrappers/Python/wip/multifile_nexus.py307
-rw-r--r--Wrappers/Python/wip/old_demos/demo_colourbay.py137
-rw-r--r--Wrappers/Python/wip/old_demos/demo_compare_cvx.py306
-rwxr-xr-xWrappers/Python/wip/old_demos/demo_gradient_descent.py295
-rw-r--r--Wrappers/Python/wip/old_demos/demo_imat_multichan_RGLTK.py151
-rw-r--r--Wrappers/Python/wip/old_demos/demo_imat_whitebeam.py138
-rwxr-xr-xWrappers/Python/wip/old_demos/demo_memhandle.py193
-rw-r--r--Wrappers/Python/wip/old_demos/demo_test_sirt.py176
-rwxr-xr-xWrappers/Python/wip/old_demos/multifile_nexus.py307
-rw-r--r--Wrappers/Python/wip/pdhg_TV_denoising_precond.py156
35 files changed, 0 insertions, 6593 deletions
diff --git a/Wrappers/Python/wip/CGLS_tikhonov.py b/Wrappers/Python/wip/CGLS_tikhonov.py
deleted file mode 100644
index e9bbcd9..0000000
--- a/Wrappers/Python/wip/CGLS_tikhonov.py
+++ /dev/null
@@ -1,196 +0,0 @@
-from ccpi.optimisation.algorithms import CGLS
-
-from ccpi.plugins.ops import CCPiProjectorSimple
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.optimisation.ops import TomoIdentity
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-from ccpi.framework import ImageGeometry, AcquisitionGeometry, ImageData, AcquisitionData
-from ccpi.optimisation.algorithms import GradientDescent
-#from ccpi.optimisation.algorithms import CGLS
-import matplotlib.pyplot as plt
-import numpy
-from ccpi.framework import BlockDataContainer
-from ccpi.optimisation.operators import BlockOperator
-
-# Set up phantom size N x N x vert by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display one slice as image.
-
-# Image parameters
-N = 128
-vert = 4
-
-# Set up image geometry
-ig = ImageGeometry(voxel_num_x=N,
- voxel_num_y=N,
- voxel_num_z=vert)
-
-# Set up empty image data
-Phantom = ImageData(geometry=ig,
- dimension_labels=['horizontal_x',
- 'horizontal_y',
- 'vertical'])
-Phantom += 0.05
-# Populate image data by looping over and filling slices
-i = 0
-while i < vert:
- if vert > 1:
- x = Phantom.subset(vertical=i).array
- else:
- x = Phantom.array
- x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
- x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94
- if vert > 1 :
- Phantom.fill(x, vertical=i)
- i += 1
-
-
-perc = 0.02
-# Set up empty image data
-noise = ImageData(numpy.random.normal(loc = 0.04 ,
- scale = perc ,
- size = Phantom.shape), geometry=ig,
- dimension_labels=['horizontal_x',
- 'horizontal_y',
- 'vertical'])
-Phantom += noise
-
-# Set up AcquisitionGeometry object to hold the parameters of the measurement
-# setup geometry: # Number of angles, the actual angles from 0 to
-# pi for parallel beam, set the width of a detector
-# pixel relative to an object pixe and the number of detector pixels.
-angles_num = 20
-det_w = 1.0
-det_num = N
-
-angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\
- 180/numpy.pi
-
-# Inputs: Geometry, 2D or 3D, angles, horz detector pixel count,
-# horz detector pixel size, vert detector pixel count,
-# vert detector pixel size.
-ag = AcquisitionGeometry('parallel',
- '3D',
- angles,
- N,
- det_w,
- vert,
- det_w)
-
-# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
-# wrapping calls to CCPi projector.
-A = CCPiProjectorSimple(ig, ag)
-
-# Forward and backprojection are available as methods direct and adjoint. Here
-# generate test data b and some noise
-
-b = A.direct(Phantom)
-
-
-#z = A.adjoint(b)
-
-
-# Using the test data b, different reconstruction methods can now be set up as
-# demonstrated in the rest of this file. In general all methods need an initial
-# guess and some algorithm options to be set. Note that 100 iterations for
-# some of the methods is a very low number and 1000 or 10000 iterations may be
-# needed if one wants to obtain a converged solution.
-x_init = ImageData(geometry=ig,
- dimension_labels=['horizontal_x','horizontal_y','vertical'])
-X_init = BlockDataContainer(x_init)
-B = BlockDataContainer(b,
- ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
-
-# setup a tomo identity
-Ibig = 1e5 * TomoIdentity(geometry=ig)
-Ismall = 1e-5 * TomoIdentity(geometry=ig)
-Iok = 1e1 * TomoIdentity(geometry=ig)
-
-# composite operator
-Kbig = BlockOperator(A, Ibig, shape=(2,1))
-Ksmall = BlockOperator(A, Ismall, shape=(2,1))
-Kok = BlockOperator(A, Iok, shape=(2,1))
-
-#out = K.direct(X_init)
-
-f = Norm2sq(Kbig,B)
-f.L = 0.00003
-
-fsmall = Norm2sq(Ksmall,B)
-fsmall.L = 0.00003
-
-fok = Norm2sq(Kok,B)
-fok.L = 0.00003
-
-simplef = Norm2sq(A, b)
-simplef.L = 0.00003
-
-gd = GradientDescent( x_init=x_init, objective_function=simplef,
- rate=simplef.L)
-gd.max_iteration = 50
-
-Kbig.direct(X_init)
-Kbig.adjoint(B)
-cg = CGLS()
-cg.set_up(X_init, Kbig, B )
-cg.max_iteration = 10
-
-cgsmall = CGLS()
-cgsmall.set_up(X_init, Ksmall, B )
-cgsmall.max_iteration = 10
-
-
-cgs = CGLS()
-cgs.set_up(x_init, A, b )
-cgs.max_iteration = 10
-
-cgok = CGLS()
-cgok.set_up(X_init, Kok, B )
-cgok.max_iteration = 10
-# #
-#out.__isub__(B)
-#out2 = K.adjoint(out)
-
-#(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
-
-for _ in gd:
- print ("iteration {} {}".format(gd.iteration, gd.get_last_loss()))
-
-cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
-
-cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
-
-cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
-cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)))
-cgok.run(10, verbose=True)
-# # for _ in cg:
-# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss()))
-# #
-# # fig = plt.figure()
-# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
-# # plt.title('Composite CGLS')
-# # plt.show()
-# #
-# # for _ in cgs:
-# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
-# #
-fig = plt.figure()
-plt.subplot(2,3,1)
-plt.imshow(Phantom.subset(vertical=0).as_array())
-plt.title('Simulated Phantom')
-plt.subplot(2,3,2)
-plt.imshow(gd.get_output().subset(vertical=0).as_array())
-plt.title('Simple Gradient Descent')
-plt.subplot(2,3,3)
-plt.imshow(cgs.get_output().subset(vertical=0).as_array())
-plt.title('Simple CGLS')
-plt.subplot(2,3,5)
-plt.imshow(cg.get_output().get_item(0).subset(vertical=0).as_array())
-plt.title('Composite CGLS\nbig lambda')
-plt.subplot(2,3,6)
-plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=0).as_array())
-plt.title('Composite CGLS\nsmall lambda')
-plt.subplot(2,3,4)
-plt.imshow(cgok.get_output().get_item(0).subset(vertical=0).as_array())
-plt.title('Composite CGLS\nok lambda')
-plt.show()
diff --git a/Wrappers/Python/wip/CreatePhantom.py b/Wrappers/Python/wip/CreatePhantom.py
deleted file mode 100644
index 4bf6ea4..0000000
--- a/Wrappers/Python/wip/CreatePhantom.py
+++ /dev/null
@@ -1,242 +0,0 @@
-import numpy
-import tomophantom
-from tomophantom import TomoP3D
-from tomophantom.supp.artifacts import ArtifactsClass as Artifact
-import os
-
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import CGLS
-from ccpi.plugins.ops import CCPiProjectorSimple
-from ccpi.optimisation.ops import PowerMethodNonsquare
-from ccpi.optimisation.ops import TomoIdentity
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-from ccpi.framework import ImageGeometry, AcquisitionGeometry, ImageData, AcquisitionData
-from ccpi.optimisation.algorithms import GradientDescent
-from ccpi.framework import BlockDataContainer
-from ccpi.optimisation.operators import BlockOperator
-
-
-model = 13 # select a model number from tomophantom library
-N_size = 64 # Define phantom dimensions using a scalar value (cubic phantom)
-path = os.path.dirname(tomophantom.__file__)
-path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
-
-#This will generate a N_size x N_size x N_size phantom (3D)
-phantom_tm = TomoP3D.Model(model, N_size, path_library3D)
-
-# detector column count (horizontal)
-detector_horiz = int(numpy.sqrt(2)*N_size)
-# detector row count (vertical) (no reason for it to be > N)
-detector_vert = N_size
-# number of projection angles
-angles_num = int(0.5*numpy.pi*N_size)
-# angles are expressed in degrees
-angles = numpy.linspace(0.0, 179.9, angles_num, dtype='float32')
-
-
-acquisition_data_array = TomoP3D.ModelSino(model, N_size,
- detector_horiz, detector_vert,
- angles,
- path_library3D)
-
-tomophantom_acquisition_axes_order = ['vertical', 'angle', 'horizontal']
-
-artifacts = Artifact(acquisition_data_array)
-
-
-tp_acq_data = AcquisitionData(artifacts.noise(0.2, 'Gaussian'),
- dimension_labels=tomophantom_acquisition_axes_order)
-#print ("size", acquisition_data.shape)
-print ("horiz", detector_horiz)
-print ("vert", detector_vert)
-print ("angles", angles_num)
-
-tp_acq_geometry = AcquisitionGeometry(geom_type='parallel', dimension='3D',
- angles=angles,
- pixel_num_h=detector_horiz,
- pixel_num_v=detector_vert,
- channels=1,
- )
-
-acq_data = tp_acq_geometry.allocate()
-#print (tp_acq_geometry)
-print ("AcquisitionData", acq_data.shape)
-print ("TomoPhantom", tp_acq_data.shape, tp_acq_data.dimension_labels)
-
-default_acquisition_axes_order = ['angle', 'vertical', 'horizontal']
-
-acq_data2 = tp_acq_data.subset(dimensions=default_acquisition_axes_order)
-print ("AcquisitionData", acq_data2.shape, acq_data2.dimension_labels)
-print ("AcquisitionData {} TomoPhantom {}".format(id(acq_data2.as_array()),
- id(acquisition_data_array)))
-
-fig = plt.figure()
-plt.subplot(1,2,1)
-plt.imshow(acquisition_data_array[20])
-plt.title('Sinogram')
-plt.subplot(1,2,2)
-plt.imshow(tp_acq_data.as_array()[20])
-plt.title('Sinogram + noise')
-plt.show()
-
-# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
-# wrapping calls to CCPi projector.
-
-ig = ImageGeometry(voxel_num_x=detector_horiz,
- voxel_num_y=detector_horiz,
- voxel_num_z=detector_vert)
-A = CCPiProjectorSimple(ig, tp_acq_geometry)
-# Forward and backprojection are available as methods direct and adjoint. Here
-# generate test data b and some noise
-
-#b = A.direct(Phantom)
-b = acq_data2
-
-#z = A.adjoint(b)
-
-
-# Using the test data b, different reconstruction methods can now be set up as
-# demonstrated in the rest of this file. In general all methods need an initial
-# guess and some algorithm options to be set. Note that 100 iterations for
-# some of the methods is a very low number and 1000 or 10000 iterations may be
-# needed if one wants to obtain a converged solution.
-x_init = ImageData(geometry=ig,
- dimension_labels=['horizontal_x','horizontal_y','vertical'])
-X_init = BlockDataContainer(x_init)
-B = BlockDataContainer(b,
- ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
-
-# setup a tomo identity
-Ibig = 4e1 * TomoIdentity(geometry=ig)
-Ismall = 1e-3 * TomoIdentity(geometry=ig)
-Iok = 7.6e0 * TomoIdentity(geometry=ig)
-
-# composite operator
-Kbig = BlockOperator(A, Ibig, shape=(2,1))
-Ksmall = BlockOperator(A, Ismall, shape=(2,1))
-Kok = BlockOperator(A, Iok, shape=(2,1))
-
-#out = K.direct(X_init)
-#x0 = x_init.copy()
-#x0.fill(numpy.random.randn(*x0.shape))
-#lipschitz = PowerMethodNonsquare(A, 5, x0)
-#print("lipschitz", lipschitz)
-
-#%%
-
-simplef = Norm2sq(A, b, memopt=False)
-#simplef.L = lipschitz[0]/3000.
-simplef.L = 0.00003
-
-f = Norm2sq(Kbig,B)
-f.L = 0.00003
-
-fsmall = Norm2sq(Ksmall,B)
-fsmall.L = 0.00003
-
-fok = Norm2sq(Kok,B)
-fok.L = 0.00003
-
-print("setup gradient descent")
-gd = GradientDescent( x_init=x_init, objective_function=simplef,
- rate=simplef.L)
-gd.max_iteration = 5
-simplef2 = Norm2sq(A, b, memopt=True)
-#simplef.L = lipschitz[0]/3000.
-simplef2.L = 0.00003
-print("setup gradient descent")
-gd2 = GradientDescent( x_init=x_init, objective_function=simplef2,
- rate=simplef2.L)
-gd2.max_iteration = 5
-
-Kbig.direct(X_init)
-Kbig.adjoint(B)
-print("setup CGLS")
-cg = CGLS()
-cg.set_up(X_init, Kbig, B )
-cg.max_iteration = 10
-
-print("setup CGLS")
-cgsmall = CGLS()
-cgsmall.set_up(X_init, Ksmall, B )
-cgsmall.max_iteration = 10
-
-
-print("setup CGLS")
-cgs = CGLS()
-cgs.set_up(x_init, A, b )
-cgs.max_iteration = 10
-
-print("setup CGLS")
-cgok = CGLS()
-cgok.set_up(X_init, Kok, B )
-cgok.max_iteration = 10
-# #
-#out.__isub__(B)
-#out2 = K.adjoint(out)
-
-#(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
-
-
-for _ in gd:
- print ("GradientDescent iteration {} {}".format(gd.iteration, gd.get_last_loss()))
-#gd2.run(5,verbose=True)
-print("CGLS block lambda big")
-cg.run(10, lambda it,val: print ("CGLS big iteration {} objective {}".format(it,val)))
-
-print("CGLS standard")
-cgs.run(10, lambda it,val: print ("CGLS standard iteration {} objective {}".format(it,val)))
-
-print("CGLS block lambda small")
-cgsmall.run(10, lambda it,val: print ("CGLS small iteration {} objective {}".format(it,val)))
-print("CGLS block lambdaok")
-cgok.run(10, verbose=True)
-# # for _ in cg:
-# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss()))
-# #
-# # fig = plt.figure()
-# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
-# # plt.title('Composite CGLS')
-# # plt.show()
-# #
-# # for _ in cgs:
-# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
-# #
-Phantom = ImageData(phantom_tm)
-
-theslice=40
-
-fig = plt.figure()
-plt.subplot(2,3,1)
-plt.imshow(numpy.flip(Phantom.subset(vertical=theslice).as_array(),axis=0), cmap='gray')
-plt.clim(0,0.7)
-plt.title('Simulated Phantom')
-plt.subplot(2,3,2)
-plt.imshow(gd.get_output().subset(vertical=theslice).as_array(), cmap='gray')
-plt.clim(0,0.7)
-plt.title('Simple Gradient Descent')
-plt.subplot(2,3,3)
-plt.imshow(cgs.get_output().subset(vertical=theslice).as_array(), cmap='gray')
-plt.clim(0,0.7)
-plt.title('Simple CGLS')
-plt.subplot(2,3,5)
-plt.imshow(cg.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
-plt.clim(0,0.7)
-plt.title('Composite CGLS\nbig lambda')
-plt.subplot(2,3,6)
-plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
-plt.clim(0,0.7)
-plt.title('Composite CGLS\nsmall lambda')
-plt.subplot(2,3,4)
-plt.imshow(cgok.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
-plt.clim(0,0.7)
-plt.title('Composite CGLS\nok lambda')
-plt.show()
-
-
-#Ibig = 7e1 * TomoIdentity(geometry=ig)
-#Kbig = BlockOperator(A, Ibig, shape=(2,1))
-#cg2 = CGLS(x_init=X_init, operator=Kbig, data=B)
-#cg2.max_iteration = 10
-#cg2.run(10, verbose=True)
diff --git a/Wrappers/Python/wip/Demos/FISTA_vs_CGLS.py b/Wrappers/Python/wip/Demos/FISTA_vs_CGLS.py
deleted file mode 100644
index 2dcaa89..0000000
--- a/Wrappers/Python/wip/Demos/FISTA_vs_CGLS.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import FISTA, CGLS
-
-from ccpi.optimisation.operators import Gradient
-from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, FunctionOperatorComposition
-from skimage.util import random_noise
-from ccpi.astra.ops import AstraProjectorSimple
-
-#%%
-
-N = 75
-x = np.zeros((N,N))
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-data = ImageData(x)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-
-detectors = N
-angles = np.linspace(0, np.pi, N, dtype=np.float32)
-
-ag = AcquisitionGeometry('parallel','2D',angles, detectors)
-Aop = AstraProjectorSimple(ig, ag, 'cpu')
-sin = Aop.direct(data)
-
-noisy_data = sin
-
-fidelity = FunctionOperatorComposition(L2NormSquared(b=noisy_data), Aop)
-regularizer = ZeroFunction()
-
-x_init = ig.allocate()
-
-## Setup and run the FISTA algorithm
-opt = {'tol': 1e-4, 'memopt':True}
-fista = FISTA(x_init=x_init , f=fidelity, g=regularizer, opt=opt)
-fista.max_iteration = 500
-fista.update_objective_interval = 50
-fista.run(500, verbose=True)
-
-## Setup and run the CGLS algorithm
-cgls = CGLS(x_init=x_init, operator=Aop, data=noisy_data)
-cgls.max_iteration = 500
-cgls.update_objective_interval = 50
-cgls.run(500, verbose=True)
-
-diff = fista.get_output() - cgls.get_output()
-
-
-#%%
-print( diff.norm())
-
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(fista.get_output().as_array())
-plt.title('FISTA reconstruction')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(cgls.get_output().as_array())
-plt.title('CGLS reconstruction')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(diff.abs().as_array())
-plt.title('Difference reconstruction')
-plt.colorbar()
-plt.show()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Wrappers/Python/wip/Demos/FISTA_vs_PDHG.py b/Wrappers/Python/wip/Demos/FISTA_vs_PDHG.py
deleted file mode 100644
index b7777ef..0000000
--- a/Wrappers/Python/wip/Demos/FISTA_vs_PDHG.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import FISTA, PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Gradient, Identity
-from ccpi.optimisation.functions import L2NormSquared, L1Norm, \
- MixedL21Norm, FunctionOperatorComposition, BlockFunction, ZeroFunction
-
-from skimage.util import random_noise
-
-# Create phantom for TV Gaussian denoising
-N = 100
-
-data = np.zeros((N,N))
-data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-data = ImageData(data)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-ag = ig
-
-# Create noisy data. Add Gaussian noise
-n1 = random_noise(data.as_array(), mode = 's&p', salt_vs_pepper = 0.9, amount=0.2)
-noisy_data = ImageData(n1)
-
-# Regularisation Parameter
-alpha = 5
-
-operator = Gradient(ig)
-
-fidelity = L1Norm(b=noisy_data)
-regulariser = FunctionOperatorComposition(alpha * L2NormSquared(), operator)
-
-x_init = ig.allocate()
-
-## Setup and run the PDHG algorithm
-opt = {'tol': 1e-4, 'memopt':True}
-fista = FISTA(x_init=x_init , f=regulariser, g=fidelity, opt=opt)
-fista.max_iteration = 2000
-fista.update_objective_interval = 50
-fista.run(2000, verbose=True)
-
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(data.as_array())
-plt.title('Ground Truth')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(noisy_data.as_array())
-plt.title('Noisy Data')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(fista.get_output().as_array())
-plt.title('TV Reconstruction')
-plt.colorbar()
-plt.show()
-
-# Compare with PDHG
-# Create operators
-op1 = Gradient(ig)
-op2 = Identity(ig, ag)
-
-# Create BlockOperator
-operator = BlockOperator(op1, op2, shape=(2,1) )
-f = BlockFunction(alpha * L2NormSquared(), fidelity)
-g = ZeroFunction()
-
-## Compute operator Norm
-normK = operator.norm()
-#
-## Primal & dual stepsizes
-sigma = 1
-tau = 1/(sigma*normK**2)
-#
-#
-## Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 50
-pdhg.run(2000)
-#
-#%%
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(fista.get_output().as_array())
-plt.title('FISTA')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(pdhg.get_output().as_array())
-plt.title('PDHG')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(np.abs(pdhg.get_output().as_array()-fista.get_output().as_array()))
-plt.title('Diff FISTA-PDHG')
-plt.colorbar()
-plt.show()
-
-
diff --git a/Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py b/Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py
deleted file mode 100644
index e67bdb1..0000000
--- a/Wrappers/Python/wip/Demos/IMAT_Reconstruction/TV_WhiteBeam_reconstruction.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageGeometry, AcquisitionGeometry, AcquisitionData
-from astropy.io import fits
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Gradient
-from ccpi.optimisation.functions import ZeroFunction, KullbackLeibler, L2NormSquared,\
- MixedL21Norm, BlockFunction
-
-from ccpi.astra.ops import AstraProjectorSimple
-
-
-# load IMAT file
-#filename_sino = '/media/newhd/shared/DataProcessed/IMAT_beamtime_Feb_2019/preprocessed_test_flat/sino/rebin_slice_350/sino_log_rebin_141.fits'
-filename_sino = '/media/newhd/shared/DataProcessed/IMAT_beamtime_Feb_2019/preprocessed_test_flat/sino/rebin_slice_350/sino_log_rebin_564.fits'
-
-sino_handler = fits.open(filename_sino)
-sino_tmp = numpy.array(sino_handler[0].data, dtype=float)
-# reorder sino coordinate: channels, angles, detectors
-sinogram = numpy.rollaxis(sino_tmp, 2)
-sino_handler.close()
-#%%
-# white beam data
-sinogram_wb = sinogram.sum(axis=0)
-
-pixh = sinogram_wb.shape[1] # detectors
-pixv = sinogram_wb.shape[1] # detectors
-
-# WhiteBeam Geometry
-igWB = ImageGeometry(voxel_num_x = pixh, voxel_num_y = pixv)
-
-# Load Golden angles
-with open("golden_angles.txt") as f:
- angles_string = [line.rstrip() for line in f]
- angles = numpy.array(angles_string).astype(float)
-agWB = AcquisitionGeometry('parallel', '2D', angles * numpy.pi / 180, pixh)
-op_WB = AstraProjectorSimple(igWB, agWB, 'gpu')
-sinogram_aqdata = AcquisitionData(sinogram_wb, agWB)
-
-# BackProjection
-result_bp = op_WB.adjoint(sinogram_aqdata)
-
-plt.imshow(result_bp.subset(channel=50).array)
-plt.title('BackProjection')
-plt.show()
-
-
-
-#%%
-
-# Regularisation Parameter
-alpha = 2000
-
-# Create operators
-op1 = Gradient(igWB)
-op2 = op_WB
-
-# Create BlockOperator
-operator = BlockOperator(op1, op2, shape=(2,1) )
-
-# Create functions
-
-f1 = alpha * MixedL21Norm()
-f2 = KullbackLeibler(sinogram_aqdata)
-#f2 = L2NormSquared(b = sinogram_aqdata)
-f = BlockFunction(f1, f2)
-
-g = ZeroFunction()
-
-diag_precon = False
-
-if diag_precon:
-
- def tau_sigma_precond(operator):
-
- tau = 1/operator.sum_abs_row()
- sigma = 1/ operator.sum_abs_col()
-
- return tau, sigma
-
- tau, sigma = tau_sigma_precond(operator)
-
-else:
- # Compute operator Norm
- normK = operator.norm()
- print ("normK", normK)
- # Primal & dual stepsizes
- sigma = 0.1
- tau = 1/(sigma*normK**2)
-
-#%%
-
-
-## Primal & dual stepsizes
-#sigma = 0.1
-#tau = 1/(sigma*normK**2)
-#
-#
-## Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 10000
-pdhg.update_objective_interval = 500
-
-def circ_mask(h, w, center=None, radius=None):
-
- if center is None: # use the middle of the image
- center = [int(w/2), int(h/2)]
- if radius is None: # use the smallest distance between the center and image walls
- radius = min(center[0], center[1], w-center[0], h-center[1])
-
- Y, X = numpy.ogrid[:h, :w]
- dist_from_center = numpy.sqrt((X - center[0])**2 + (Y-center[1])**2)
-
- mask = dist_from_center <= radius
- return mask
-
-def show_result(niter, objective, solution):
-
- mask = circ_mask(pixh, pixv, center=None, radius = 220) # 55 with 141,
- plt.imshow(solution.as_array() * mask)
- plt.colorbar()
- plt.title("Iter: {}".format(niter))
- plt.show()
-
-
- print( "{:04}/{:04} {:<5} {:.4f} {:<5} {:.4f} {:<5} {:.4f}".\
- format(niter, pdhg.max_iteration,'', \
- objective[0],'',\
- objective[1],'',\
- objective[2]))
-
-pdhg.run(10000, callback = show_result)
-
-#%%
-
-mask = circ_mask(pixh, pixv, center=None, radius = 210) # 55 with 141,
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(pdhg.get_output().as_array() * mask)
-plt.title('Ground Truth')
-plt.colorbar()
-plt.show()
diff --git a/Wrappers/Python/wip/Demos/IMAT_Reconstruction/golden_angles.txt b/Wrappers/Python/wip/Demos/IMAT_Reconstruction/golden_angles.txt
deleted file mode 100644
index 95ce73a..0000000
--- a/Wrappers/Python/wip/Demos/IMAT_Reconstruction/golden_angles.txt
+++ /dev/null
@@ -1,186 +0,0 @@
-0
-0.9045
-1.809
-2.368
-3.2725
-4.736
-5.6405
-6.1995
-7.104
-8.5675
-9.472
-10.9356
-11.8401
-12.3991
-13.3036
-14.7671
-15.6716
-16.2306
-17.1351
-18.0396
-18.5986
-19.5031
-20.9666
-21.8711
-22.4301
-23.3346
-24.7981
-25.7026
-27.1661
-28.0706
-28.6297
-29.5342
-30.9977
-31.9022
-32.4612
-33.3657
-34.8292
-35.7337
-37.1972
-38.1017
-38.6607
-39.5652
-41.0287
-41.9332
-42.4922
-43.3967
-44.3012
-44.8602
-45.7647
-47.2283
-48.1328
-48.6918
-49.5963
-51.0598
-51.9643
-53.4278
-54.3323
-54.8913
-55.7958
-57.2593
-58.1638
-58.7228
-59.6273
-60.5318
-61.0908
-61.9953
-63.4588
-64.3633
-64.9224
-65.8269
-67.2904
-68.1949
-69.6584
-70.5629
-71.1219
-72.0264
-73.4899
-74.3944
-74.9534
-75.8579
-77.3214
-78.2259
-79.6894
-80.5939
-81.1529
-82.0574
-83.521
-84.4255
-84.9845
-85.889
-86.7935
-87.3525
-88.257
-89.7205
-90.625
-91.184
-92.0885
-93.552
-94.4565
-95.92
-96.8245
-97.3835
-98.288
-99.7516
-100.656
-101.215
-102.12
-103.583
-104.488
-105.951
-106.856
-107.415
-108.319
-109.783
-110.687
-111.246
-112.151
-113.055
-113.614
-114.519
-115.982
-116.887
-117.446
-118.35
-119.814
-120.718
-122.182
-123.086
-123.645
-124.55
-126.013
-126.918
-127.477
-128.381
-129.286
-129.845
-130.749
-132.213
-133.117
-133.676
-134.581
-136.044
-136.949
-138.412
-139.317
-139.876
-140.78
-142.244
-143.148
-143.707
-144.612
-146.075
-146.98
-148.443
-149.348
-149.907
-150.811
-152.275
-153.179
-153.738
-154.643
-155.547
-156.106
-157.011
-158.474
-159.379
-159.938
-160.842
-162.306
-163.21
-164.674
-165.578
-166.137
-167.042
-168.505
-169.41
-169.969
-170.873
-172.337
-173.242
-174.705
-175.609
-176.168
-177.073
-178.536
-179.441
diff --git a/Wrappers/Python/wip/Demos/LeastSq_CGLS_FISTA_PDHG.py b/Wrappers/Python/wip/Demos/LeastSq_CGLS_FISTA_PDHG.py
deleted file mode 100644
index 97c71ba..0000000
--- a/Wrappers/Python/wip/Demos/LeastSq_CGLS_FISTA_PDHG.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG, CGLS, FISTA
-
-from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, FunctionOperatorComposition
-from ccpi.astra.ops import AstraProjectorSimple
-
-#%%
-
-N = 68
-x = np.zeros((N,N))
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-data = ImageData(x)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-
-detectors = N
-angles = np.linspace(0, np.pi, N, dtype=np.float32)
-
-ag = AcquisitionGeometry('parallel','2D',angles, detectors)
-Aop = AstraProjectorSimple(ig, ag, 'cpu')
-sin = Aop.direct(data)
-
-noisy_data = sin
-
-
-#%%
-###############################################################################
-## Setup and run the CGLS algorithm
-
-x_init = ig.allocate()
-cgls = CGLS(x_init=x_init, operator=Aop, data=noisy_data)
-cgls.max_iteration = 500
-cgls.update_objective_interval = 50
-cgls.run(500, verbose=True)
-
-#%%
-plt.imshow(cgls.get_output().as_array())
-#%%
-###############################################################################
-## Setup and run the PDHG algorithm
-
-operator = Aop
-f = L2NormSquared(b = noisy_data)
-g = ZeroFunction()
-
-## Compute operator Norm
-normK = operator.norm()
-
-## Primal & dual stepsizes
-sigma = 0.1
-tau = 1/(sigma*normK**2)
-
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 50
-pdhg.run(2000)
-
-
-#%%
-###############################################################################
-## Setup and run the FISTA algorithm
-
-fidelity = FunctionOperatorComposition(L2NormSquared(b=noisy_data), Aop)
-regularizer = ZeroFunction()
-
-## Setup and run the FISTA algorithm
-opt = {'memopt':True}
-fista = FISTA(x_init=x_init , f=fidelity, g=regularizer, opt=opt)
-fista.max_iteration = 2000
-fista.update_objective_interval = 200
-fista.run(2000, verbose=True)
-
-#%% Show results
-
-diff1 = pdhg.get_output() - cgls.get_output()
-diff2 = fista.get_output() - cgls.get_output()
-
-print( diff1.norm())
-print( diff2.norm())
-
-plt.figure(figsize=(10,10))
-plt.subplot(2,3,1)
-plt.imshow(cgls.get_output().as_array())
-plt.title('CGLS reconstruction')
-plt.subplot(2,3,2)
-plt.imshow(pdhg.get_output().as_array())
-plt.title('PDHG reconstruction')
-plt.subplot(2,3,3)
-plt.imshow(fista.get_output().as_array())
-plt.title('FISTA reconstruction')
-plt.subplot(2,3,4)
-plt.imshow(diff1.abs().as_array())
-plt.title('Diff PDHG vs CGLS')
-plt.colorbar()
-plt.subplot(2,3,5)
-plt.imshow(diff2.abs().as_array())
-plt.title('Diff FISTA vs CGLS')
-plt.colorbar()
-plt.show()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#
-#
-#
-#
-#
-#
-#
-#
diff --git a/Wrappers/Python/wip/Demos/PDHG_TGV_Denoising_SaltPepper.py b/Wrappers/Python/wip/Demos/PDHG_TGV_Denoising_SaltPepper.py
deleted file mode 100644
index 7b65c31..0000000
--- a/Wrappers/Python/wip/Demos/PDHG_TGV_Denoising_SaltPepper.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Feb 22 14:53:03 2019
-
-@author: evangelos
-"""
-
-from ccpi.framework import ImageData, ImageGeometry
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Identity, \
- Gradient, SymmetrizedGradient, ZeroOperator
-from ccpi.optimisation.functions import ZeroFunction, L1Norm, \
- MixedL21Norm, BlockFunction
-
-from skimage.util import random_noise
-
-# Create phantom for TGV SaltPepper denoising
-
-N = 100
-
-data = np.zeros((N,N))
-
-x1 = np.linspace(0, int(N/2), N)
-x2 = np.linspace(int(N/2), 0., N)
-xv, yv = np.meshgrid(x1, x2)
-
-xv[int(N/4):int(3*N/4)-1, int(N/4):int(3*N/4)-1] = yv[int(N/4):int(3*N/4)-1, int(N/4):int(3*N/4)-1].T
-
-data = xv
-data = ImageData(data/data.max())
-
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-ag = ig
-
-# Create noisy data. Add Gaussian noise
-n1 = random_noise(data.as_array(), mode = 's&p', salt_vs_pepper = 0.9, amount=0.2)
-noisy_data = ImageData(n1)
-
-# Regularisation Parameters
-alpha = 0.8
-beta = numpy.sqrt(2)* alpha
-
-method = '1'
-
-if method == '0':
-
- # Create operators
- op11 = Gradient(ig)
- op12 = Identity(op11.range_geometry())
-
- op22 = SymmetrizedGradient(op11.domain_geometry())
- op21 = ZeroOperator(ig, op22.range_geometry())
-
- op31 = Identity(ig, ag)
- op32 = ZeroOperator(op22.domain_geometry(), ag)
-
- operator = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) )
-
- f1 = alpha * MixedL21Norm()
- f2 = beta * MixedL21Norm()
- f3 = L1Norm(b=noisy_data)
- f = BlockFunction(f1, f2, f3)
- g = ZeroFunction()
-
-else:
-
- # Create operators
- op11 = Gradient(ig)
- op12 = Identity(op11.range_geometry())
- op22 = SymmetrizedGradient(op11.domain_geometry())
- op21 = ZeroOperator(ig, op22.range_geometry())
-
- operator = BlockOperator(op11, -1*op12, op21, op22, shape=(2,2) )
-
- f1 = alpha * MixedL21Norm()
- f2 = beta * MixedL21Norm()
-
- f = BlockFunction(f1, f2)
- g = BlockFunction(L1Norm(b=noisy_data), ZeroFunction())
-
-## Compute operator Norm
-normK = operator.norm()
-#
-# Primal & dual stepsizes
-sigma = 1
-tau = 1/(sigma*normK**2)
-
-
-# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 50
-pdhg.run(2000)
-
-#%%
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(data.as_array())
-plt.title('Ground Truth')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(noisy_data.as_array())
-plt.title('Noisy Data')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(pdhg.get_output()[0].as_array())
-plt.title('TGV Reconstruction')
-plt.colorbar()
-plt.show()
-##
-plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
-plt.plot(np.linspace(0,N,N), pdhg.get_output()[0].as_array()[int(N/2),:], label = 'TV reconstruction')
-plt.legend()
-plt.title('Middle Line Profiles')
-plt.show()
-
-
-#%% Check with CVX solution
-
-from ccpi.optimisation.operators import SparseFiniteDiff
-
-try:
- from cvxpy import *
- cvx_not_installable = True
-except ImportError:
- cvx_not_installable = False
-
-if cvx_not_installable:
-
- u = Variable(ig.shape)
- w1 = Variable((N, N))
- w2 = Variable((N, N))
-
- # create TGV regulariser
- DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
- DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
-
- regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u) - vec(w1), \
- DY.matrix() * vec(u) - vec(w2)]), 2, axis = 0)) + \
- beta * sum(norm(vstack([ DX.matrix().transpose() * vec(w1), DY.matrix().transpose() * vec(w2), \
- 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ), \
- 0.5 * ( DX.matrix().transpose() * vec(w2) + DY.matrix().transpose() * vec(w1) ) ]), 2, axis = 0 ) )
-
- constraints = []
- fidelity = pnorm(u - noisy_data.as_array(),1)
- solver = MOSEK
-
- # choose solver
- if 'MOSEK' in installed_solvers():
- solver = MOSEK
- else:
- solver = SCS
-
- obj = Minimize( regulariser + fidelity)
- prob = Problem(obj)
- result = prob.solve(verbose = True, solver = solver)
-
- diff_cvx = numpy.abs( pdhg.get_output()[0].as_array() - u.value )
-
- plt.figure(figsize=(15,15))
- plt.subplot(3,1,1)
- plt.imshow(pdhg.get_output()[0].as_array())
- plt.title('PDHG solution')
- plt.colorbar()
- plt.subplot(3,1,2)
- plt.imshow(u.value)
- plt.title('CVX solution')
- plt.colorbar()
- plt.subplot(3,1,3)
- plt.imshow(diff_cvx)
- plt.title('Difference')
- plt.colorbar()
- plt.show()
-
- plt.plot(np.linspace(0,N,N), pdhg.get_output()[0].as_array()[int(N/2),:], label = 'PDHG')
- plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX')
- plt.legend()
- plt.title('Middle Line Profiles')
- plt.show()
-
- print('Primal Objective (CVX) {} '.format(obj.value))
- print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0]))
-
-
-
-
-
diff --git a/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Gaussian_DiagPrecond.py b/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Gaussian_DiagPrecond.py
deleted file mode 100644
index d65478c..0000000
--- a/Wrappers/Python/wip/Demos/PDHG_TV_Denoising_Gaussian_DiagPrecond.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Total Variation Denoising using PDHG algorithm
-
-Problem: min_x \alpha * ||\nabla x||_{1} + || x - g ||_{2}^{2}
-
- \nabla: Gradient operator
- g: Noisy Data with Gaussian Noise
- \alpha: Regularization parameter
-
-"""
-
-from ccpi.framework import ImageData, ImageGeometry
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
-from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \
- MixedL21Norm, BlockFunction
-
-
-# Create phantom for TV Gaussian denoising
-N = 400
-
-data = np.zeros((N,N))
-data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-data = ImageData(data)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-ag = ig
-
-# Create noisy data. Add Gaussian noise
-np.random.seed(10)
-noisy_data = ImageData( data.as_array() + np.random.normal(0, 0.05, size=ig.shape) )
-
-# Regularisation Parameter
-alpha = 2
-
-method = '1'
-
-if method == '0':
-
- # Create operators
- op1 = Gradient(ig)
- op2 = Identity(ig, ag)
-
- # Create BlockOperator
- operator = BlockOperator(op1, op2, shape=(2,1) )
-
- # Create functions
-
- f1 = alpha * MixedL21Norm()
- f2 = 0.5 * L2NormSquared(b = noisy_data)
- f = BlockFunction(f1, f2)
-
- g = ZeroFunction()
-
-else:
-
- # Without the "Block Framework"
- operator = Gradient(ig)
- f = alpha * MixedL21Norm()
- g = 0.5 * L2NormSquared(b = noisy_data)
-
-
-diag_precon = False
-
-
-if diag_precon:
-
- def tau_sigma_precond(operator):
-
- tau = 1/operator.sum_abs_col()
- sigma = 1/operator.sum_abs_row()
-
- sigma[0].as_array()[sigma[0].as_array()==np.inf]=0
- sigma[1].as_array()[sigma[1].as_array()==np.inf]=0
-
- return tau, sigma
-
- tau, sigma = tau_sigma_precond(operator)
-
-else:
- # Compute operator Norm
- normK = operator.norm()
-
- # Primal & dual stepsizes
- sigma = 1
- tau = 1/(sigma*normK**2)
-
-
-# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 3000
-pdhg.update_objective_interval = 200
-pdhg.run(3000, verbose=False)
-
-#%%
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(data.as_array())
-plt.title('Ground Truth')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(noisy_data.as_array())
-plt.title('Noisy Data')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(pdhg.get_output().as_array())
-plt.title('TV Reconstruction')
-plt.colorbar()
-plt.show()
-##
-plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
-plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'TV reconstruction')
-plt.legend()
-plt.title('Middle Line Profiles')
-plt.show()
-
-
-#%% Check with CVX solution
-
-from ccpi.optimisation.operators import SparseFiniteDiff
-
-try:
- from cvxpy import *
- cvx_not_installable = True
-except ImportError:
- cvx_not_installable = False
-
-
-if cvx_not_installable:
-
- ##Construct problem
- u = Variable(ig.shape)
-
- DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
- DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
-
- # Define Total Variation as a regulariser
- regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
- fidelity = 0.5 * sum_squares(u - noisy_data.as_array())
-
- # choose solver
- if 'MOSEK' in installed_solvers():
- solver = MOSEK
- else:
- solver = SCS
-
- obj = Minimize( regulariser + fidelity)
- prob = Problem(obj)
- result = prob.solve(verbose = True, solver = MOSEK)
-
- diff_cvx = numpy.abs( pdhg.get_output().as_array() - u.value )
-
- plt.figure(figsize=(15,15))
- plt.subplot(3,1,1)
- plt.imshow(pdhg.get_output().as_array())
- plt.title('PDHG solution')
- plt.colorbar()
- plt.subplot(3,1,2)
- plt.imshow(u.value)
- plt.title('CVX solution')
- plt.colorbar()
- plt.subplot(3,1,3)
- plt.imshow(diff_cvx)
- plt.title('Difference')
- plt.colorbar()
- plt.show()
-
- plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'PDHG')
- plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX')
- plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'Truth')
-
- plt.legend()
- plt.title('Middle Line Profiles')
- plt.show()
-
- print('Primal Objective (CVX) {} '.format(obj.value))
- print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0]))
-
-
-
-
-
diff --git a/Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D.py b/Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D.py
deleted file mode 100644
index 87d5328..0000000
--- a/Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
-from ccpi.optimisation.functions import ZeroFunction, KullbackLeibler, \
- MixedL21Norm, BlockFunction
-
-from ccpi.astra.ops import AstraProjectorSimple
-
-"""
-
-Total Variation Denoising using PDHG algorithm:
-
- min_{x} max_{y} < K x, y > + g(x) - f^{*}(y)
-
-
-Problem: min_x, x>0 \alpha * ||\nabla x||_{1} + int A x -g log(Ax + \eta)
-
- \nabla: Gradient operator
-
- A: Projection Matrix
- g: Noisy sinogram corrupted with Poisson Noise
-
- \eta: Background Noise
- \alpha: Regularization parameter
-
-"""
-
-# Create phantom for TV 2D tomography
-N = 75
-x = np.zeros((N,N))
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-data = ImageData(x)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-
-detectors = N
-angles = np.linspace(0, np.pi, N, dtype=np.float32)
-
-ag = AcquisitionGeometry('parallel','2D',angles, detectors)
-Aop = AstraProjectorSimple(ig, ag, 'cpu')
-sin = Aop.direct(data)
-
-# Create noisy data. Apply Poisson noise
-scale = 2
-n1 = scale * np.random.poisson(sin.as_array()/scale)
-noisy_data = AcquisitionData(n1, ag)
-
-# Regularisation Parameter
-alpha = 5
-
-# Create operators
-op1 = Gradient(ig)
-op2 = Aop
-
-# Create BlockOperator
-operator = BlockOperator(op1, op2, shape=(2,1) )
-
-# Create functions
-
-f1 = alpha * MixedL21Norm()
-f2 = KullbackLeibler(noisy_data)
-f = BlockFunction(f1, f2)
-
-g = ZeroFunction()
-
-diag_precon = True
-
-if diag_precon:
-
- def tau_sigma_precond(operator):
-
- tau = 1/operator.sum_abs_row()
- sigma = 1/ operator.sum_abs_col()
-
- return tau, sigma
-
- tau, sigma = tau_sigma_precond(operator)
-
-else:
- # Compute operator Norm
- normK = operator.norm()
- # Primal & dual stepsizes
- sigma = 10
- tau = 1/(sigma*normK**2)
-
-
-# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 50
-pdhg.run(2000)
-
-
-#%%
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(data.as_array())
-plt.title('Ground Truth')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(noisy_data.as_array())
-plt.title('Noisy Data')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(pdhg.get_output().as_array())
-plt.title('TV Reconstruction')
-plt.colorbar()
-plt.show()
-##
-plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
-plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'TV reconstruction')
-plt.legend()
-plt.title('Middle Line Profiles')
-plt.show()
-
-
-#%% Check with CVX solution
-
-from ccpi.optimisation.operators import SparseFiniteDiff
-import astra
-import numpy
-
-try:
- from cvxpy import *
- cvx_not_installable = True
-except ImportError:
- cvx_not_installable = False
-
-
-if cvx_not_installable:
-
-
- ##Construct problem
- u = Variable(N*N)
- #q = Variable()
-
- DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
- DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
-
- regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
-
- # create matrix representation for Astra operator
-
- vol_geom = astra.create_vol_geom(N, N)
- proj_geom = astra.create_proj_geom('parallel', 1.0, detectors, angles)
-
- proj_id = astra.create_projector('strip', proj_geom, vol_geom)
-
- matrix_id = astra.projector.matrix(proj_id)
-
- ProjMat = astra.matrix.get(matrix_id)
-
- fidelity = sum( ProjMat * u - noisy_data.as_array().ravel() * log(ProjMat * u))
- #constraints = [q>= fidelity, u>=0]
- constraints = [u>=0]
-
- solver = SCS
- obj = Minimize( regulariser + fidelity)
- prob = Problem(obj, constraints)
- result = prob.solve(verbose = True, solver = solver)
-
-
-##%% Check with CVX solution
-
-from ccpi.optimisation.operators import SparseFiniteDiff
-
-try:
- from cvxpy import *
- cvx_not_installable = True
-except ImportError:
- cvx_not_installable = False
-
-
-if cvx_not_installable:
-
- ##Construct problem
- u = Variable(ig.shape)
-
- DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
- DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
-
- # Define Total Variation as a regulariser
- regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
- fidelity = pnorm( u - noisy_data.as_array(),1)
-
- # choose solver
- if 'MOSEK' in installed_solvers():
- solver = MOSEK
- else:
- solver = SCS
-
- obj = Minimize( regulariser + fidelity)
- prob = Problem(obj)
- result = prob.solve(verbose = True, solver = solver)
-
-
- plt.figure(figsize=(15,15))
- plt.subplot(3,1,1)
- plt.imshow(pdhg.get_output().as_array())
- plt.title('PDHG solution')
- plt.colorbar()
- plt.subplot(3,1,2)
- plt.imshow(np.reshape(u.value, (N, N)))
- plt.title('CVX solution')
- plt.colorbar()
- plt.subplot(3,1,3)
- plt.imshow(diff_cvx)
- plt.title('Difference')
- plt.colorbar()
- plt.show()
-
- plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'PDHG')
- plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX')
- plt.legend()
- plt.title('Middle Line Profiles')
- plt.show()
-
- print('Primal Objective (CVX) {} '.format(obj.value))
- print('Primal Objective (PDHG) {} '.format(pdhg.objective[-1][0])) \ No newline at end of file
diff --git a/Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D_time.py b/Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D_time.py
deleted file mode 100644
index 045458a..0000000
--- a/Wrappers/Python/wip/Demos/PDHG_TV_Tomo2D_time.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Gradient
-from ccpi.optimisation.functions import ZeroFunction, KullbackLeibler, \
- MixedL21Norm, BlockFunction
-
-from ccpi.astra.ops import AstraProjectorMC
-
-import os
-import tomophantom
-from tomophantom import TomoP2D
-
-# Create phantom for TV 2D dynamic tomography
-
-model = 102 # note that the selected model is temporal (2D + time)
-N = 50 # set dimension of the phantom
-# one can specify an exact path to the parameters file
-# path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat'
-path = os.path.dirname(tomophantom.__file__)
-path_library2D = os.path.join(path, "Phantom2DLibrary.dat")
-#This will generate a N_size x N_size x Time frames phantom (2D + time)
-phantom_2Dt = TomoP2D.ModelTemporal(model, N, path_library2D)
-
-plt.close('all')
-plt.figure(1)
-plt.rcParams.update({'font.size': 21})
-plt.title('{}''{}'.format('2D+t phantom using model no.',model))
-for sl in range(0,np.shape(phantom_2Dt)[0]):
- im = phantom_2Dt[sl,:,:]
- plt.imshow(im, vmin=0, vmax=1)
- plt.pause(.1)
- plt.draw
-
-
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N, channels = np.shape(phantom_2Dt)[0])
-data = ImageData(phantom_2Dt, geometry=ig)
-
-detectors = N
-angles = np.linspace(0,np.pi,N)
-
-ag = AcquisitionGeometry('parallel','2D', angles, detectors, channels = np.shape(phantom_2Dt)[0])
-Aop = AstraProjectorMC(ig, ag, 'gpu')
-sin = Aop.direct(data)
-
-scale = 2
-n1 = scale * np.random.poisson(sin.as_array()/scale)
-noisy_data = AcquisitionData(n1, ag)
-
-tindex = [3, 6, 10]
-
-fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(10, 10))
-plt.subplot(1,3,1)
-plt.imshow(noisy_data.as_array()[tindex[0],:,:])
-plt.axis('off')
-plt.title('Time {}'.format(tindex[0]))
-plt.subplot(1,3,2)
-plt.imshow(noisy_data.as_array()[tindex[1],:,:])
-plt.axis('off')
-plt.title('Time {}'.format(tindex[1]))
-plt.subplot(1,3,3)
-plt.imshow(noisy_data.as_array()[tindex[2],:,:])
-plt.axis('off')
-plt.title('Time {}'.format(tindex[2]))
-
-fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
- wspace=0.02, hspace=0.02)
-
-plt.show()
-
-#%%
-# Regularisation Parameter
-alpha = 5
-
-# Create operators
-#op1 = Gradient(ig)
-op1 = Gradient(ig, correlation='SpaceChannels')
-op2 = Aop
-
-# Create BlockOperator
-operator = BlockOperator(op1, op2, shape=(2,1) )
-
-# Create functions
-
-f1 = alpha * MixedL21Norm()
-f2 = KullbackLeibler(noisy_data)
-f = BlockFunction(f1, f2)
-
-g = ZeroFunction()
-
-# Compute operator Norm
-normK = operator.norm()
-
-# Primal & dual stepsizes
-sigma = 1
-tau = 1/(sigma*normK**2)
-
-
-# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 200
-pdhg.run(2000)
-
-
-#%%
-fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 8))
-
-plt.subplot(2,3,1)
-plt.imshow(phantom_2Dt[tindex[0],:,:],vmin=0, vmax=1)
-plt.axis('off')
-plt.title('Time {}'.format(tindex[0]))
-
-plt.subplot(2,3,2)
-plt.imshow(phantom_2Dt[tindex[1],:,:],vmin=0, vmax=1)
-plt.axis('off')
-plt.title('Time {}'.format(tindex[1]))
-
-plt.subplot(2,3,3)
-plt.imshow(phantom_2Dt[tindex[2],:,:],vmin=0, vmax=1)
-plt.axis('off')
-plt.title('Time {}'.format(tindex[2]))
-
-
-plt.subplot(2,3,4)
-plt.imshow(pdhg.get_output().as_array()[tindex[0],:,:])
-plt.axis('off')
-plt.subplot(2,3,5)
-plt.imshow(pdhg.get_output().as_array()[tindex[1],:,:])
-plt.axis('off')
-plt.subplot(2,3,6)
-plt.imshow(pdhg.get_output().as_array()[tindex[2],:,:])
-plt.axis('off')
-im = plt.imshow(pdhg.get_output().as_array()[tindex[0],:,:])
-
-
-fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
- wspace=0.02, hspace=0.02)
-
-cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
-cbar = fig.colorbar(im, cax=cb_ax)
-
-
-plt.show()
-
diff --git a/Wrappers/Python/wip/Demos/PDHG_Tikhonov_Tomo2D.py b/Wrappers/Python/wip/Demos/PDHG_Tikhonov_Tomo2D.py
deleted file mode 100644
index f17c4fe..0000000
--- a/Wrappers/Python/wip/Demos/PDHG_Tikhonov_Tomo2D.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Gradient
-from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, BlockFunction
-from skimage.util import random_noise
-from ccpi.astra.ops import AstraProjectorSimple
-
-# Create phantom for TV 2D tomography
-N = 75
-x = np.zeros((N,N))
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-data = ImageData(x)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-
-detectors = N
-angles = np.linspace(0, np.pi, N, dtype=np.float32)
-
-ag = AcquisitionGeometry('parallel','2D',angles, detectors)
-Aop = AstraProjectorSimple(ig, ag, 'gpu')
-sin = Aop.direct(data)
-
-# Create noisy data. Apply Gaussian noise
-
-np.random.seed(10)
-noisy_data = sin + AcquisitionData(np.random.normal(0, 3, sin.shape))
-
-# Regularisation Parameter
-alpha = 500
-
-# Create operators
-op1 = Gradient(ig)
-op2 = Aop
-
-# Create BlockOperator
-operator = BlockOperator(op1, op2, shape=(2,1) )
-
-# Create functions
-
-f1 = alpha * L2NormSquared()
-f2 = 0.5 * L2NormSquared(b=noisy_data)
-f = BlockFunction(f1, f2)
-
-g = ZeroFunction()
-
-# Compute operator Norm
-normK = operator.norm()
-
-# Primal & dual stepsizes
-sigma = 1
-tau = 1/(sigma*normK**2)
-
-
-# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 5000
-pdhg.update_objective_interval = 50
-pdhg.run(2000)
-
-#%%
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(data.as_array())
-plt.title('Ground Truth')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(noisy_data.as_array())
-plt.title('Noisy Data')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(pdhg.get_output().as_array())
-plt.title('Tikhonov Reconstruction')
-plt.colorbar()
-plt.show()
-##
-plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
-plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'Tikhonov reconstruction')
-plt.legend()
-plt.title('Middle Line Profiles')
-plt.show()
-
-
diff --git a/Wrappers/Python/wip/Demos/PDHG_vs_CGLS.py b/Wrappers/Python/wip/Demos/PDHG_vs_CGLS.py
deleted file mode 100644
index 3155654..0000000
--- a/Wrappers/Python/wip/Demos/PDHG_vs_CGLS.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, AcquisitionData
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG, CGLS
-
-from ccpi.optimisation.operators import Gradient
-from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, FunctionOperatorComposition
-from skimage.util import random_noise
-from ccpi.astra.ops import AstraProjectorSimple
-
-#%%
-
-N = 128
-x = np.zeros((N,N))
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-data = ImageData(x)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-
-detectors = N
-angles = np.linspace(0, np.pi, N, dtype=np.float32)
-
-ag = AcquisitionGeometry('parallel','2D',angles, detectors)
-Aop = AstraProjectorSimple(ig, ag, 'cpu')
-sin = Aop.direct(data)
-
-noisy_data = sin
-
-x_init = ig.allocate()
-
-## Setup and run the CGLS algorithm
-cgls = CGLS(x_init=x_init, operator=Aop, data=noisy_data)
-cgls.max_iteration = 500
-cgls.update_objective_interval = 50
-cgls.run(500, verbose=True)
-
-# Create BlockOperator
-operator = Aop
-f = 0.5 * L2NormSquared(b = noisy_data)
-g = ZeroFunction()
-
-## Compute operator Norm
-normK = operator.norm()
-
-## Primal & dual stepsizes
-sigma = 0.1
-tau = 1/(sigma*normK**2)
-#
-#
-## Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 2000
-pdhg.update_objective_interval = 50
-pdhg.run(2000)
-
-#%%
-
-diff = pdhg.get_output() - cgls.get_output()
-print( diff.norm())
-#
-plt.figure(figsize=(15,15))
-plt.subplot(3,1,1)
-plt.imshow(pdhg.get_output().as_array())
-plt.title('PDHG reconstruction')
-plt.colorbar()
-plt.subplot(3,1,2)
-plt.imshow(cgls.get_output().as_array())
-plt.title('CGLS reconstruction')
-plt.colorbar()
-plt.subplot(3,1,3)
-plt.imshow(diff.abs().as_array())
-plt.title('Difference reconstruction')
-plt.colorbar()
-plt.show()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#
-#
-#
-#
-#
-#
-#
-#
diff --git a/Wrappers/Python/wip/Demos/check_blockOperator_sum_row_cols.py b/Wrappers/Python/wip/Demos/check_blockOperator_sum_row_cols.py
deleted file mode 100644
index bdb2c38..0000000
--- a/Wrappers/Python/wip/Demos/check_blockOperator_sum_row_cols.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri May 3 13:10:09 2019
-
-@author: evangelos
-"""
-
-from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff, BlockOperator, Gradient
-from ccpi.framework import ImageGeometry, AcquisitionGeometry, BlockDataContainer, ImageData
-from ccpi.astra.ops import AstraProjectorSimple
-
-from scipy import sparse
-import numpy as np
-
-N = 3
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-u = ig.allocate('random_int')
-
-# Compare FiniteDiff with SparseFiniteDiff
-
-DY = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann')
-DX = FiniteDiff(ig, direction = 1, bnd_cond = 'Neumann')
-
-DXu = DX.direct(u)
-DYu = DY.direct(u)
-
-DX_sparse = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
-DY_sparse = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
-
-DXu_sparse = DX_sparse.direct(u)
-DYu_sparse = DY_sparse.direct(u)
-
-#np.testing.assert_array_almost_equal(DYu.as_array(), DYu_sparse.as_array(), decimal=4)
-#np.testing.assert_array_almost_equal(DXu.as_array(), DXu_sparse.as_array(), decimal=4)
-
-#%% Tau/ Sigma
-
-A1 = DY_sparse.matrix()
-A2 = DX_sparse.matrix()
-A3 = sparse.eye(np.prod(ig.shape))
-
-sum_rows1 = np.array(np.sum(abs(A1), axis=1))
-sum_rows2 = np.array(np.sum(abs(A2), axis=1))
-sum_rows3 = np.array(np.sum(abs(A3), axis=1))
-
-sum_cols1 = np.array(np.sum(abs(A1), axis=0))
-sum_cols2 = np.array(np.sum(abs(A2), axis=0))
-sum_cols3 = np.array(np.sum(abs(A2), axis=0))
-
-# Check if Grad sum row/cols is OK
-Grad = Gradient(ig)
-
-Sum_Block_row = Grad.sum_abs_row()
-Sum_Block_col = Grad.sum_abs_col()
-
-tmp1 = BlockDataContainer( ImageData(np.reshape(sum_rows1, ig.shape, order='F')),\
- ImageData(np.reshape(sum_rows2, ig.shape, order='F')))
-
-
-#np.testing.assert_array_almost_equal(tmp1[0].as_array(), Sum_Block_row[0].as_array(), decimal=4)
-#np.testing.assert_array_almost_equal(tmp1[1].as_array(), Sum_Block_row[1].as_array(), decimal=4)
-
-tmp2 = ImageData(np.reshape(sum_cols1 + sum_cols2, ig.shape, order='F'))
-
-#np.testing.assert_array_almost_equal(tmp2.as_array(), Sum_Block_col.as_array(), decimal=4)
-
-
-#%% BlockOperator with Gradient, Identity
-
-Id = Identity(ig)
-Block_GrId = BlockOperator(Grad, Id, shape=(2,1))
-
-
-Sum_Block_GrId_row = Block_GrId.sum_abs_row()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Wrappers/Python/wip/Demos/check_precond.py b/Wrappers/Python/wip/Demos/check_precond.py
deleted file mode 100644
index 8cf95fa..0000000
--- a/Wrappers/Python/wip/Demos/check_precond.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry
-
-import numpy as np
-import numpy
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG
-
-from ccpi.optimisation.operators import BlockOperator, Gradient
-from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \
- MixedL21Norm, BlockFunction
-
-from ccpi.astra.ops import AstraProjectorSimple
-
-# Create phantom for TV 2D tomography
-N = 75
-x = np.zeros((N,N))
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-data = ImageData(x)
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-
-detectors = N
-angles = np.linspace(0, np.pi, N, dtype=np.float32)
-
-ag = AcquisitionGeometry('parallel','2D',angles, detectors)
-Aop = AstraProjectorSimple(ig, ag, 'cpu')
-sin = Aop.direct(data)
-
-# Create noisy data
-np.random.seed(10)
-n1 = np.random.random(sin.shape)
-noisy_data = sin + ImageData(5*n1)
-
-#%%
-
-# Regularisation Parameter
-alpha = 50
-
-# Create operators
-op1 = Gradient(ig)
-op2 = Aop
-
-# Create BlockOperator
-operator = BlockOperator(op1, op2, shape=(2,1) )
-
-
-
-# Create functions
-
-f1 = alpha * MixedL21Norm()
-f2 = L2NormSquared(b=noisy_data)
-f = BlockFunction(f1, f2)
-
-g = ZeroFunction()
-
-diag_precon = True
-
-if diag_precon:
-
- def tau_sigma_precond(operator):
-
- tau = 1/operator.sum_abs_row()
- sigma = 1/ operator.sum_abs_col()
-
- return tau, sigma
-
- tau, sigma = tau_sigma_precond(operator)
-
-else:
- # Compute operator Norm
- normK = operator.norm()
- # Primal & dual stepsizes
- sigma = 10
- tau = 1/(sigma*normK**2)
-
-
-# Setup and run the PDHG algorithm
-pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True)
-pdhg.max_iteration = 1000
-pdhg.update_objective_interval = 200
-pdhg.run(1000)
-
-#%% Check with CVX solution
-
-from ccpi.optimisation.operators import SparseFiniteDiff
-import astra
-import numpy
-
-try:
- from cvxpy import *
- cvx_not_installable = True
-except ImportError:
- cvx_not_installable = False
-
-
-if cvx_not_installable:
-
- ##Construct problem
- u = Variable(N*N)
-
- DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
- DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
-
- regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
-
- # create matrix representation for Astra operator
-
- vol_geom = astra.create_vol_geom(N, N)
- proj_geom = astra.create_proj_geom('parallel', 1.0, detectors, angles)
-
- proj_id = astra.create_projector('line', proj_geom, vol_geom)
-
- matrix_id = astra.projector.matrix(proj_id)
-
- ProjMat = astra.matrix.get(matrix_id)
-
- fidelity = sum_squares( ProjMat * u - noisy_data.as_array().ravel())
- #constraints = [q>=fidelity]
-# constraints = [u>=0]
-
- solver = MOSEK
- obj = Minimize( regulariser + fidelity)
- prob = Problem(obj)
- result = prob.solve(verbose = True, solver = solver)
-
-
-#%%
-
-plt.figure(figsize=(15,15))
-plt.subplot(2,2,1)
-plt.imshow(data.as_array())
-plt.title('Ground Truth')
-
-plt.subplot(2,2,2)
-plt.imshow(noisy_data.as_array())
-plt.title('Noisy Data')
-
-plt.subplot(2,2,3)
-plt.imshow(pdhg.get_output().as_array())
-plt.title('PDHG Reconstruction')
-
-plt.subplot(2,2,4)
-plt.imshow(np.reshape(u.value, ig.shape))
-plt.title('CVX Reconstruction')
-
-plt.show()
-
-#%%
-plt.plot(np.linspace(0,N,N), pdhg.get_output().as_array()[int(N/2),:], label = 'PDHG')
-plt.plot(np.linspace(0,N,N), np.reshape(u.value, ig.shape)[int(N/2),:], label = 'CVX')
-plt.legend()
-plt.title('Middle Line Profiles')
-plt.show()
-
-
-
-
-
-
-
-
diff --git a/Wrappers/Python/wip/compare_CGLS_algos.py b/Wrappers/Python/wip/compare_CGLS_algos.py
deleted file mode 100644
index 52f3f31..0000000
--- a/Wrappers/Python/wip/compare_CGLS_algos.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# This demo illustrates how to use the SIRT algorithm without and with
-# nonnegativity and box constraints. The ASTRA 2D projectors are used.
-
-# First make all imports
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, \
- AcquisitionData
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS, SIRT
-from ccpi.astra.operators import AstraProjectorSimple
-
-from ccpi.optimisation.algorithms import CGLS as CGLSalg
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.functions import Norm2Sq
-
-# Choose either a parallel-beam (1=parallel2D) or fan-beam (2=cone2D) test case
-test_case = 1
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 128
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-#plt.figure()
-#plt.imshow(x)
-#plt.title('Phantom image')
-#plt.show()
-
-# Set up AcquisitionGeometry object to hold the parameters of the measurement
-# setup geometry: # Number of angles, the actual angles from 0 to
-# pi for parallel beam and 0 to 2pi for fanbeam, set the width of a detector
-# pixel relative to an object pixel, the number of detector pixels, and the
-# source-origin and origin-detector distance (here the origin-detector distance
-# set to 0 to simulate a "virtual detector" with same detector pixel size as
-# object pixel size).
-angles_num = 20
-det_w = 1.0
-det_num = N
-SourceOrig = 200
-OrigDetec = 0
-
-if test_case==1:
- angles = np.linspace(0,np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('parallel',
- '2D',
- angles,
- det_num,det_w)
-elif test_case==2:
- angles = np.linspace(0,2*np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('cone',
- '2D',
- angles,
- det_num,
- det_w,
- dist_source_center=SourceOrig,
- dist_center_detector=OrigDetec)
-else:
- NotImplemented
-
-# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
-# wrapping calls to ASTRA as well as specifying whether to use CPU or GPU.
-Aop = AstraProjectorSimple(ig, ag, 'cpu')
-
-# Forward and backprojection are available as methods direct and adjoint. Here
-# generate test data b and do simple backprojection to obtain z.
-b = Aop.direct(Phantom)
-z = Aop.adjoint(b)
-
-#plt.figure()
-#plt.imshow(b.array)
-#plt.title('Simulated data')
-#plt.show()
-
-#plt.figure()
-#plt.imshow(z.array)
-#plt.title('Backprojected data')
-#plt.show()
-
-# Using the test data b, different reconstruction methods can now be set up as
-# demonstrated in the rest of this file. In general all methods need an initial
-# guess and some algorithm options to be set:
-x_init = ImageData(np.zeros(x.shape),geometry=ig)
-opt = {'tol': 1e-4, 'iter': 7}
-
-# First a CGLS reconstruction using the function version of CGLS can be done:
-x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b, opt)
-
-#plt.figure()
-#plt.imshow(x_CGLS.array)
-#plt.title('CGLS')
-#plt.colorbar()
-#plt.show()
-
-#plt.figure()
-#plt.semilogy(criter_CGLS)
-#plt.title('CGLS criterion')
-#plt.show()
-
-f = Norm2Sq(Aop, b, c=1.)
-
-def callback(it, objective, solution):
- print (objective, f(solution))
-
-# Now CLGS using the algorithm class
-CGLS_alg = CGLSalg()
-CGLS_alg.set_up(x_init, Aop, b )
-CGLS_alg.max_iteration = 500
-CGLS_alg.update_objective_interval = 10
-CGLS_alg.run(300, callback=callback)
-x_CGLS_alg = CGLS_alg.get_output()
-
-plt.figure()
-plt.imshow(x_CGLS_alg.as_array())
-plt.title('CGLS ALG')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(CGLS_alg.objective)
-plt.title('CGLS criterion')
-plt.show()
-
-print(criter_CGLS)
-print(CGLS_alg.objective)
-
-print((x_CGLS - x_CGLS_alg).norm()) \ No newline at end of file
diff --git a/Wrappers/Python/wip/demo_SIRT.py b/Wrappers/Python/wip/demo_SIRT.py
deleted file mode 100644
index 5a85d41..0000000
--- a/Wrappers/Python/wip/demo_SIRT.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# This demo illustrates how to use the SIRT algorithm without and with
-# nonnegativity and box constraints. The ASTRA 2D projectors are used.
-
-# First make all imports
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, \
- AcquisitionData
-from ccpi.optimisation.functions import IndicatorBox
-from ccpi.astra.ops import AstraProjectorSimple
-from ccpi.optimisation.algorithms import SIRT, CGLS
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-# Choose either a parallel-beam (1=parallel2D) or fan-beam (2=cone2D) test case
-test_case = 1
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 128
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.figure()
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Set up AcquisitionGeometry object to hold the parameters of the measurement
-# setup geometry: # Number of angles, the actual angles from 0 to
-# pi for parallel beam and 0 to 2pi for fanbeam, set the width of a detector
-# pixel relative to an object pixel, the number of detector pixels, and the
-# source-origin and origin-detector distance (here the origin-detector distance
-# set to 0 to simulate a "virtual detector" with same detector pixel size as
-# object pixel size).
-angles_num = 20
-det_w = 1.0
-det_num = N
-SourceOrig = 200
-OrigDetec = 0
-
-if test_case==1:
- angles = np.linspace(0,np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('parallel',
- '2D',
- angles,
- det_num,det_w)
-elif test_case==2:
- angles = np.linspace(0,2*np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('cone',
- '2D',
- angles,
- det_num,
- det_w,
- dist_source_center=SourceOrig,
- dist_center_detector=OrigDetec)
-else:
- NotImplemented
-
-# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
-# wrapping calls to ASTRA as well as specifying whether to use CPU or GPU.
-Aop = AstraProjectorSimple(ig, ag, 'gpu')
-
-# Forward and backprojection are available as methods direct and adjoint. Here
-# generate test data b and do simple backprojection to obtain z.
-b = Aop.direct(Phantom)
-z = Aop.adjoint(b)
-
-plt.figure()
-plt.imshow(b.as_array())
-plt.title('Simulated data')
-plt.show()
-
-plt.figure()
-plt.imshow(z.as_array())
-plt.title('Backprojected data')
-plt.show()
-
-# Using the test data b, different reconstruction methods can now be set up as
-# demonstrated in the rest of this file. In general all methods need an initial
-# guess and some algorithm options to be set:
-x_init = ImageData(np.zeros(x.shape),geometry=ig)
-opt = {'tol': 1e-4, 'iter': 100}
-
-
-# First run a simple CGLS reconstruction:
-CGLS_alg = CGLS()
-CGLS_alg.set_up(x_init, Aop, b )
-CGLS_alg.max_iteration = 2000
-CGLS_alg.run(opt['iter'])
-x_CGLS_alg = CGLS_alg.get_output()
-
-plt.figure()
-plt.imshow(x_CGLS_alg.as_array())
-plt.title('CGLS ALG')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(CGLS_alg.objective)
-plt.title('CGLS criterion')
-plt.show()
-
-
-# A SIRT reconstruction can be done simply by replacing CGLS by SIRT.
-# In the first instance, no constraints are enforced.
-SIRT_alg = SIRT()
-SIRT_alg.set_up(x_init, Aop, b )
-SIRT_alg.max_iteration = 2000
-SIRT_alg.run(opt['iter'])
-x_SIRT_alg = SIRT_alg.get_output()
-
-plt.figure()
-plt.imshow(x_SIRT_alg.as_array())
-plt.title('SIRT unconstrained')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(SIRT_alg.objective)
-plt.title('SIRT unconstrained criterion')
-plt.show()
-
-# The SIRT algorithm is stopped after the specified number of iterations has
-# been run. It can be resumed by calling the run command again, which will run
-# it for the specificed number of iterations
-SIRT_alg.run(opt['iter'])
-x_SIRT_alg2 = SIRT_alg.get_output()
-
-plt.figure()
-plt.imshow(x_SIRT_alg2.as_array())
-plt.title('SIRT unconstrained, extra iterations')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(SIRT_alg.objective)
-plt.title('SIRT unconstrained criterion, extra iterations')
-plt.show()
-
-
-# A SIRT nonnegativity constrained reconstruction can be done using the
-# additional input "constraint" set to a box indicator function with 0 as the
-# lower bound and the default upper bound of infinity. First setup a new
-# instance of the SIRT algorithm.
-SIRT_alg0 = SIRT()
-SIRT_alg0.set_up(x_init, Aop, b, constraint=IndicatorBox(lower=0) )
-SIRT_alg0.max_iteration = 2000
-SIRT_alg0.run(opt['iter'])
-x_SIRT_alg0 = SIRT_alg0.get_output()
-
-plt.figure()
-plt.imshow(x_SIRT_alg0.as_array())
-plt.title('SIRT nonnegativity constrained')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(SIRT_alg0.objective)
-plt.title('SIRT nonnegativity criterion')
-plt.show()
-
-
-# A SIRT reconstruction with box constraints on [0,1] can also be done.
-SIRT_alg01 = SIRT()
-SIRT_alg01.set_up(x_init, Aop, b, constraint=IndicatorBox(lower=0,upper=1) )
-SIRT_alg01.max_iteration = 2000
-SIRT_alg01.run(opt['iter'])
-x_SIRT_alg01 = SIRT_alg01.get_output()
-
-plt.figure()
-plt.imshow(x_SIRT_alg01.as_array())
-plt.title('SIRT boc(0,1)')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(SIRT_alg01.objective)
-plt.title('SIRT box(0,1) criterion')
-plt.show()
-
-# The test image has values in the range [0,1], so enforcing values in the
-# reconstruction to be within this interval improves a lot. Just for fun
-# we can also easily see what happens if we choose a narrower interval as
-# constrint in the reconstruction, lower bound 0.2, upper bound 0.8.
-SIRT_alg0208 = SIRT()
-SIRT_alg0208.set_up(x_init,Aop,b,constraint=IndicatorBox(lower=0.2,upper=0.8))
-SIRT_alg0208.max_iteration = 2000
-SIRT_alg0208.run(opt['iter'])
-x_SIRT_alg0208 = SIRT_alg0208.get_output()
-
-plt.figure()
-plt.imshow(x_SIRT_alg0208.as_array())
-plt.title('SIRT boc(0.2,0.8)')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(SIRT_alg0208.objective)
-plt.title('SIRT box(0.2,0.8) criterion')
-plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/wip/demo_box_constraints_FISTA.py b/Wrappers/Python/wip/demo_box_constraints_FISTA.py
deleted file mode 100644
index b15dd45..0000000
--- a/Wrappers/Python/wip/demo_box_constraints_FISTA.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Apr 17 14:46:21 2019
-
-@author: jakob
-
-Demonstrate the use of box constraints in FISTA
-"""
-
-# First make all imports
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, \
- AcquisitionData
-from ccpi.optimisation.algorithms import FISTA
-from ccpi.optimisation.functions import Norm2sq, IndicatorBox
-from ccpi.astra.ops import AstraProjectorSimple
-
-from ccpi.optimisation.operators import Identity
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 128
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.figure()
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Set up AcquisitionGeometry object to hold the parameters of the measurement
-# setup geometry: # Number of angles, the actual angles from 0 to
-# pi for parallel beam and 0 to 2pi for fanbeam, set the width of a detector
-# pixel relative to an object pixel, the number of detector pixels, and the
-# source-origin and origin-detector distance (here the origin-detector distance
-# set to 0 to simulate a "virtual detector" with same detector pixel size as
-# object pixel size).
-angles_num = 20
-det_w = 1.0
-det_num = N
-SourceOrig = 200
-OrigDetec = 0
-
-test_case = 1
-
-if test_case==1:
- angles = np.linspace(0,np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('parallel',
- '2D',
- angles,
- det_num,det_w)
-elif test_case==2:
- angles = np.linspace(0,2*np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('cone',
- '2D',
- angles,
- det_num,
- det_w,
- dist_source_center=SourceOrig,
- dist_center_detector=OrigDetec)
-else:
- NotImplemented
-
-# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
-# wrapping calls to ASTRA as well as specifying whether to use CPU or GPU.
-Aop = AstraProjectorSimple(ig, ag, 'cpu')
-
-Aop = Identity(ig,ig)
-
-# Forward and backprojection are available as methods direct and adjoint. Here
-# generate test data b and do simple backprojection to obtain z.
-b = Aop.direct(Phantom)
-z = Aop.adjoint(b)
-
-plt.figure()
-plt.imshow(b.array)
-plt.title('Simulated data')
-plt.show()
-
-plt.figure()
-plt.imshow(z.array)
-plt.title('Backprojected data')
-plt.show()
-
-# Using the test data b, different reconstruction methods can now be set up as
-# demonstrated in the rest of this file. In general all methods need an initial
-# guess and some algorithm options to be set:
-x_init = ImageData(np.zeros(x.shape),geometry=ig)
-opt = {'tol': 1e-4, 'iter': 100}
-
-
-
-# Create least squares object instance with projector, test data and a constant
-# coefficient of 0.5:
-f = Norm2sq(Aop,b,c=0.5)
-
-# Run FISTA for least squares without constraints
-FISTA_alg = FISTA()
-FISTA_alg.set_up(x_init=x_init, f=f, opt=opt)
-FISTA_alg.max_iteration = 2000
-FISTA_alg.run(opt['iter'])
-x_FISTA = FISTA_alg.get_output()
-
-plt.figure()
-plt.imshow(x_FISTA.array)
-plt.title('FISTA unconstrained')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(FISTA_alg.objective)
-plt.title('FISTA unconstrained criterion')
-plt.show()
-
-# Run FISTA for least squares with lower bound 0.1
-FISTA_alg0 = FISTA()
-FISTA_alg0.set_up(x_init=x_init, f=f, g=IndicatorBox(lower=0.1), opt=opt)
-FISTA_alg0.max_iteration = 2000
-FISTA_alg0.run(opt['iter'])
-x_FISTA0 = FISTA_alg0.get_output()
-
-plt.figure()
-plt.imshow(x_FISTA0.array)
-plt.title('FISTA lower bound 0.1')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(FISTA_alg0.objective)
-plt.title('FISTA criterion, lower bound 0.1')
-plt.show()
-
-# Run FISTA for least squares with box constraint [0.1,0.8]
-FISTA_alg0 = FISTA()
-FISTA_alg0.set_up(x_init=x_init, f=f, g=IndicatorBox(lower=0.1,upper=0.8), opt=opt)
-FISTA_alg0.max_iteration = 2000
-FISTA_alg0.run(opt['iter'])
-x_FISTA0 = FISTA_alg0.get_output()
-
-plt.figure()
-plt.imshow(x_FISTA0.array)
-plt.title('FISTA box(0.1,0.8) constrained')
-plt.colorbar()
-plt.show()
-
-plt.figure()
-plt.semilogy(FISTA_alg0.objective)
-plt.title('FISTA criterion, box(0.1,0.8) constrained criterion')
-plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/wip/demo_colourbay.py b/Wrappers/Python/wip/demo_colourbay.py
deleted file mode 100644
index 0536b07..0000000
--- a/Wrappers/Python/wip/demo_colourbay.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# This script demonstrates how to load a mat-file with UoM colour-bay data
-# into the CIL optimisation framework and run (simple) multichannel
-# reconstruction methods.
-
-# All third-party imports.
-import numpy
-from scipy.io import loadmat
-import matplotlib.pyplot as plt
-
-# All own imports.
-from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData
-from ccpi.astra.ops import AstraProjectorMC
-from ccpi.optimisation.algs import CGLS, FISTA
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-
-# Load full data and permute to expected ordering. Change path as necessary.
-# The loaded X has dims 80x60x80x150, which is pix x angle x pix x channel.
-# Permute (numpy.transpose) puts into our default ordering which is
-# (channel, angle, vertical, horizontal).
-
-pathname = '/media/newhd/shared/Data/ColourBay/spectral_data_sets/CarbonPd/'
-filename = 'carbonPd_full_sinogram_stripes_removed.mat'
-
-X = loadmat(pathname + filename)
-X = numpy.transpose(X['SS'],(3,1,2,0))
-
-# Store geometric variables for reuse
-num_channels = X.shape[0]
-num_pixels_h = X.shape[3]
-num_pixels_v = X.shape[2]
-num_angles = X.shape[1]
-
-# Display a single projection in a single channel
-plt.imshow(X[100,5,:,:])
-plt.title('Example of a projection image in one channel' )
-plt.show()
-
-# Set angles to use
-angles = numpy.linspace(-numpy.pi,numpy.pi,num_angles,endpoint=False)
-
-# Define full 3D acquisition geometry and data container.
-# Geometric info is taken from the txt-file in the same dir as the mat-file
-ag = AcquisitionGeometry('cone',
- '3D',
- angles,
- pixel_num_h=num_pixels_h,
- pixel_size_h=0.25,
- pixel_num_v=num_pixels_v,
- pixel_size_v=0.25,
- dist_source_center=233.0,
- dist_center_detector=245.0,
- channels=num_channels)
-data = AcquisitionData(X, geometry=ag)
-
-# Reduce to central slice by extracting relevant parameters from data and its
-# geometry. Perhaps create function to extract central slice automatically?
-data2d = data.subset(vertical=40)
-ag2d = AcquisitionGeometry('cone',
- '2D',
- ag.angles,
- pixel_num_h=ag.pixel_num_h,
- pixel_size_h=ag.pixel_size_h,
- pixel_num_v=1,
- pixel_size_v=ag.pixel_size_h,
- dist_source_center=ag.dist_source_center,
- dist_center_detector=ag.dist_center_detector,
- channels=ag.channels)
-data2d.geometry = ag2d
-
-# Set up 2D Image Geometry.
-# First need the geometric magnification to scale the voxel size relative
-# to the detector pixel size.
-mag = (ag.dist_source_center + ag.dist_center_detector)/ag.dist_source_center
-ig2d = ImageGeometry(voxel_num_x=ag2d.pixel_num_h,
- voxel_num_y=ag2d.pixel_num_h,
- voxel_size_x=ag2d.pixel_size_h/mag,
- voxel_size_y=ag2d.pixel_size_h/mag,
- channels=X.shape[0])
-
-# Create GPU multichannel projector/backprojector operator with ASTRA.
-Aall = AstraProjectorMC(ig2d,ag2d,'gpu')
-
-# Compute and simple backprojction and display one channel as image.
-Xbp = Aall.adjoint(data2d)
-plt.imshow(Xbp.subset(channel=100).array)
-plt.show()
-
-# Set initial guess ImageData with zeros for algorithms, and algorithm options.
-x_init = ImageData(numpy.zeros((num_channels,num_pixels_v,num_pixels_h)),
- geometry=ig2d,
- dimension_labels=['channel','horizontal_y','horizontal_x'])
-opt_CGLS = {'tol': 1e-4, 'iter': 5}
-
-# Run CGLS algorithm and display one channel.
-x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aall, data2d, opt_CGLS)
-
-plt.imshow(x_CGLS.subset(channel=100).array)
-plt.title('CGLS')
-plt.show()
-
-plt.semilogy(criter_CGLS)
-plt.title('CGLS Criterion vs iterations')
-plt.show()
-
-# Create least squares object instance with projector, test data and a constant
-# coefficient of 0.5. Note it is least squares over all channels.
-f = Norm2sq(Aall,data2d,c=0.5)
-
-# Options for FISTA algorithm.
-opt = {'tol': 1e-4, 'iter': 100}
-
-# Run FISTA for least squares without regularization and display one channel
-# reconstruction as image.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, None, opt)
-
-plt.imshow(x_fista0.subset(channel=100).array)
-plt.title('FISTA LS')
-plt.show()
-
-plt.semilogy(criter0)
-plt.title('FISTA LS Criterion vs iterations')
-plt.show()
-
-# Set up 1-norm regularisation (over all channels), solve with FISTA, and
-# display one channel of reconstruction.
-lam = 0.1
-g0 = Norm1(lam)
-
-x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g0, opt)
-
-plt.imshow(x_fista1.subset(channel=100).array)
-plt.title('FISTA LS+1')
-plt.show()
-
-plt.semilogy(criter1)
-plt.title('FISTA LS+1 Criterion vs iterations')
-plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py
deleted file mode 100644
index 27b1c97..0000000
--- a/Wrappers/Python/wip/demo_compare_cvx.py
+++ /dev/null
@@ -1,306 +0,0 @@
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS
-from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D, Norm2
-
-from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity
-from ccpi.optimisation.ops import Identity
-from ccpi.optimisation.ops import FiniteDiff2D
-
-# Requires CVXPY, see http://www.cvxpy.org/
-# CVXPY can be installed in anaconda using
-# conda install -c cvxgrp cvxpy libgcc
-
-# Whether to use or omit CVXPY
-use_cvxpy = True
-if use_cvxpy:
- from cvxpy import *
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-# Problem data.
-m = 30
-n = 20
-np.random.seed(1)
-Amat = np.random.randn(m, n)
-A = LinearOperatorMatrix(Amat)
-bmat = np.random.randn(m)
-bmat.shape = (bmat.shape[0],1)
-
-# A = Identity()
-# Change n to equal to m.
-
-b = DataContainer(bmat)
-
-# Regularization parameter
-lam = 10
-opt = {'memopt':True}
-# Create object instances with the test data A and b.
-f = Norm2sq(A,b,c=0.5, memopt=True)
-g0 = ZeroFun()
-
-# Initial guess
-x_init = DataContainer(np.zeros((n,1)))
-
-f.grad(x_init)
-
-# Run FISTA for least squares plus zero function.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)
-
-# Print solution and final objective/criterion value for comparison
-print("FISTA least squares plus zero function solution and objective value:")
-print(x_fista0.array)
-print(criter0[-1])
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- x0 = Variable(n)
- objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) )
- prob0 = Problem(objective0)
-
- # The optimal objective is returned by prob.solve().
- result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus zero function solution and objective value:")
- print(x0.value)
- print(objective0.value)
-
-# Plot criterion curve to see FISTA converge to same value as CVX.
-iternum = np.arange(1,1001)
-plt.figure()
-plt.loglog(iternum[[0,-1]],[objective0.value, objective0.value], label='CVX LS')
-plt.loglog(iternum,criter0,label='FISTA LS')
-plt.legend()
-plt.show()
-
-# Create 1-norm object instance
-g1 = Norm1(lam)
-
-g1(x_init)
-x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1)))
-x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1)))
-v = g1.prox(x_rand,0.02)
-#vv = g1.prox(x_rand2,0.02)
-vv = v.copy()
-vv *= 0
-print (">>>>>>>>>>vv" , vv.as_array())
-vv.fill(v)
-print (">>>>>>>>>>fill" , vv.as_array())
-g1.proximal(x_rand, 0.02, out=vv)
-print (">>>>>>>>>>v" , v.as_array())
-print (">>>>>>>>>>gradient" , vv.as_array())
-
-print (">>>>>>>>>>" , (v-vv).as_array())
-import sys
-#sys.exit(0)
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt)
-
-# Print for comparison
-print("FISTA least squares plus 1-norm solution and objective value:")
-print(x_fista1)
-print(criter1[-1])
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- x1 = Variable(n)
- objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
- prob1 = Problem(objective1)
-
- # The optimal objective is returned by prob.solve().
- result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus 1-norm solution and objective value:")
- print(x1.value)
- print(objective1.value)
-
-# Now try another algorithm FBPD for same problem:
-x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1)
-print(x_fbpd1)
-print(criterfbpd1[-1])
-
-# Plot criterion curve to see both FISTA and FBPD converge to same value.
-# Note that FISTA is very efficient for 1-norm minimization so it beats
-# FBPD in this test by a lot. But FBPD can handle a larger class of problems
-# than FISTA can.
-plt.figure()
-plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.legend()
-plt.show()
-
-plt.figure()
-plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
-plt.legend()
-plt.show()
-
-# Now try 1-norm and TV denoising with FBPD, first 1-norm.
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 64
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Identity operator for denoising
-I = TomoIdentity(ig)
-
-# Data and add noise
-y = I.direct(Phantom)
-y.array = y.array + 0.1*np.random.randn(N, N)
-
-plt.imshow(y.array)
-plt.title('Noisy image')
-plt.show()
-
-
-###################
-# Data fidelity term
-f_denoise = Norm2sq(I,y,c=0.5,memopt=True)
-
-# 1-norm regulariser
-lam1_denoise = 1.0
-g1_denoise = Norm1(lam1_denoise)
-
-# Initial guess
-x_init_denoise = ImageData(np.zeros((N,N)))
-
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-
-print(x_fista1_denoise)
-print(criter1_denoise[-1])
-
-#plt.imshow(x_fista1_denoise.as_array())
-#plt.title('FISTA LS+1')
-#plt.show()
-
-# Now denoise LS + 1-norm with FBPD
-x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, \
- criterfbpd1_denoise = FBPD(x_init_denoise, I, None, f_denoise, g1_denoise)
-print(x_fbpd1_denoise)
-print(criterfbpd1_denoise[-1])
-
-#plt.imshow(x_fbpd1_denoise.as_array())
-#plt.title('FBPD LS+1')
-#plt.show()
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- x1_denoise = Variable(N**2,1)
- objective1_denoise = Minimize(0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise,1) )
- prob1_denoise = Problem(objective1_denoise)
-
- # The optimal objective is returned by prob.solve().
- result1_denoise = prob1_denoise.solve(verbose=False,solver=SCS,eps=1e-12)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus 1-norm solution and objective value:")
- print(x1_denoise.value)
- print(objective1_denoise.value)
-
-x1_cvx = x1_denoise.value
-x1_cvx.shape = (N,N)
-
-
-
-#plt.imshow(x1_cvx)
-#plt.title('CVX LS+1')
-#plt.show()
-
-fig = plt.figure()
-plt.subplot(1,4,1)
-plt.imshow(y.array)
-plt.title("LS+1")
-plt.subplot(1,4,2)
-plt.imshow(x_fista1_denoise.as_array())
-plt.title("fista")
-plt.subplot(1,4,3)
-plt.imshow(x_fbpd1_denoise.as_array())
-plt.title("fbpd")
-plt.subplot(1,4,4)
-plt.imshow(x1_cvx)
-plt.title("cvx")
-plt.show()
-
-##############################################################
-# Now TV with FBPD and Norm2
-lam_tv = 0.1
-gtv = TV2D(lam_tv)
-norm2 = Norm2(lam_tv)
-op = FiniteDiff2D()
-#gtv(gtv.op.direct(x_init_denoise))
-
-opt_tv = {'tol': 1e-4, 'iter': 10000}
-
-x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, \
- criterfbpdtv_denoise = FBPD(x_init_denoise, op, None, \
- f_denoise, norm2 ,opt=opt_tv)
-print(x_fbpdtv_denoise)
-print(criterfbpdtv_denoise[-1])
-
-plt.imshow(x_fbpdtv_denoise.as_array())
-plt.title('FBPD TV')
-#plt.show()
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- xtv_denoise = Variable((N,N))
- #print (xtv_denoise.value.shape)
- objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) )
- probtv_denoise = Problem(objectivetv_denoise)
-
- # The optimal objective is returned by prob.solve().
- resulttv_denoise = probtv_denoise.solve(verbose=False,solver=SCS,eps=1e-12)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus 1-norm solution and objective value:")
- print(xtv_denoise.value)
- print(objectivetv_denoise.value)
-
-plt.imshow(xtv_denoise.value)
-plt.title('CVX TV')
-#plt.show()
-
-fig = plt.figure()
-plt.subplot(1,3,1)
-plt.imshow(y.array)
-plt.title("TV2D")
-plt.subplot(1,3,2)
-plt.imshow(x_fbpdtv_denoise.as_array())
-plt.title("fbpd tv denoise")
-plt.subplot(1,3,3)
-plt.imshow(xtv_denoise.value)
-plt.title("CVX tv")
-plt.show()
-
-
-
-plt.loglog([0,opt_tv['iter']], [objectivetv_denoise.value,objectivetv_denoise.value], label='CVX TV')
-plt.loglog(criterfbpdtv_denoise, label='FBPD TV')
diff --git a/Wrappers/Python/wip/demo_gradient_descent.py b/Wrappers/Python/wip/demo_gradient_descent.py
deleted file mode 100755
index 4d6647e..0000000
--- a/Wrappers/Python/wip/demo_gradient_descent.py
+++ /dev/null
@@ -1,295 +0,0 @@
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS
-from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D, Norm2
-
-from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity
-from ccpi.optimisation.ops import Identity
-from ccpi.optimisation.ops import FiniteDiff2D
-
-# Requires CVXPY, see http://www.cvxpy.org/
-# CVXPY can be installed in anaconda using
-# conda install -c cvxgrp cvxpy libgcc
-
-# Whether to use or omit CVXPY
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-class Algorithm(object):
- def __init__(self, *args, **kwargs):
- pass
- def set_up(self, *args, **kwargs):
- raise NotImplementedError()
- def update(self):
- raise NotImplementedError()
-
- def should_stop(self):
- raise NotImplementedError()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self.should_stop():
- raise StopIteration()
- else:
- self.update()
-
-class GradientDescent(Algorithm):
- x = None
- rate = 0
- objective_function = None
- regulariser = None
- iteration = 0
- stop_cryterion = 'max_iter'
- __max_iteration = 0
- __loss = []
- def __init__(self, **kwargs):
- args = ['x_init', 'objective_function', 'rate']
- present = True
- for k,v in kwargs.items():
- if k in args:
- args.pop(args.index(k))
- if len(args) == 0:
- return self.set_up(x_init=kwargs['x_init'],
- objective_function=kwargs['objective_function'],
- rate=kwargs['rate'])
-
- def should_stop(self):
- return self.iteration >= self.max_iteration
-
- def set_up(self, x_init, objective_function, rate):
- self.x = x_init.copy()
- self.x_update = x_init.copy()
- self.objective_function = objective_function
- self.rate = rate
- self.__loss.append(objective_function(x_init))
-
- def update(self):
-
- self.objective_function.gradient(self.x, out=self.x_update)
- self.x_update *= -self.rate
- self.x += self.x_update
- self.__loss.append(self.objective_function(self.x))
- self.iteration += 1
-
- def get_output(self):
- return self.x
- def get_current_loss(self):
- return self.__loss[-1]
- @property
- def loss(self):
- return self.__loss
- @property
- def max_iteration(self):
- return self.__max_iteration
- @max_iteration.setter
- def max_iteration(self, value):
- assert isinstance(value, int)
- self.__max_iteration = value
-
-
-
-
-
-# Problem data.
-m = 30
-n = 20
-np.random.seed(1)
-Amat = np.random.randn(m, n)
-A = LinearOperatorMatrix(Amat)
-bmat = np.random.randn(m)
-bmat.shape = (bmat.shape[0],1)
-
-# A = Identity()
-# Change n to equal to m.
-
-b = DataContainer(bmat)
-
-# Regularization parameter
-lam = 10
-opt = {'memopt':True}
-# Create object instances with the test data A and b.
-f = Norm2sq(A,b,c=0.5, memopt=True)
-g0 = ZeroFun()
-
-# Initial guess
-x_init = DataContainer(np.zeros((n,1)))
-
-f.grad(x_init)
-
-# Run FISTA for least squares plus zero function.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)
-
-# Print solution and final objective/criterion value for comparison
-print("FISTA least squares plus zero function solution and objective value:")
-print(x_fista0.array)
-print(criter0[-1])
-
-gd = GradientDescent(x_init=x_init, objective_function=f, rate=0.001)
-gd.max_iteration = 5000
-
-for i,el in enumerate(gd):
- if i%100 == 0:
- print ("\rIteration {} Loss: {}".format(gd.iteration,
- gd.get_current_loss()))
-
-
-#%%
-
-
-#
-#if use_cvxpy:
-# # Compare to CVXPY
-#
-# # Construct the problem.
-# x0 = Variable(n)
-# objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) )
-# prob0 = Problem(objective0)
-#
-# # The optimal objective is returned by prob.solve().
-# result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9)
-#
-# # The optimal solution for x is stored in x.value and optimal objective value
-# # is in result as well as in objective.value
-# print("CVXPY least squares plus zero function solution and objective value:")
-# print(x0.value)
-# print(objective0.value)
-#
-## Plot criterion curve to see FISTA converge to same value as CVX.
-#iternum = np.arange(1,1001)
-#plt.figure()
-#plt.loglog(iternum[[0,-1]],[objective0.value, objective0.value], label='CVX LS')
-#plt.loglog(iternum,criter0,label='FISTA LS')
-#plt.legend()
-#plt.show()
-#
-## Create 1-norm object instance
-#g1 = Norm1(lam)
-#
-#g1(x_init)
-#x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1)))
-#x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1)))
-#v = g1.prox(x_rand,0.02)
-##vv = g1.prox(x_rand2,0.02)
-#vv = v.copy()
-#vv *= 0
-#print (">>>>>>>>>>vv" , vv.as_array())
-#vv.fill(v)
-#print (">>>>>>>>>>fill" , vv.as_array())
-#g1.proximal(x_rand, 0.02, out=vv)
-#print (">>>>>>>>>>v" , v.as_array())
-#print (">>>>>>>>>>gradient" , vv.as_array())
-#
-#print (">>>>>>>>>>" , (v-vv).as_array())
-#import sys
-##sys.exit(0)
-## Combine with least squares and solve using generic FISTA implementation
-#x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt)
-#
-## Print for comparison
-#print("FISTA least squares plus 1-norm solution and objective value:")
-#print(x_fista1)
-#print(criter1[-1])
-#
-#if use_cvxpy:
-# # Compare to CVXPY
-#
-# # Construct the problem.
-# x1 = Variable(n)
-# objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
-# prob1 = Problem(objective1)
-#
-# # The optimal objective is returned by prob.solve().
-# result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9)
-#
-# # The optimal solution for x is stored in x.value and optimal objective value
-# # is in result as well as in objective.value
-# print("CVXPY least squares plus 1-norm solution and objective value:")
-# print(x1.value)
-# print(objective1.value)
-#
-## Now try another algorithm FBPD for same problem:
-#x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1)
-#print(x_fbpd1)
-#print(criterfbpd1[-1])
-#
-## Plot criterion curve to see both FISTA and FBPD converge to same value.
-## Note that FISTA is very efficient for 1-norm minimization so it beats
-## FBPD in this test by a lot. But FBPD can handle a larger class of problems
-## than FISTA can.
-#plt.figure()
-#plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-#plt.loglog(iternum,criter1,label='FISTA LS+1')
-#plt.legend()
-#plt.show()
-#
-#plt.figure()
-#plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-#plt.loglog(iternum,criter1,label='FISTA LS+1')
-#plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
-#plt.legend()
-#plt.show()
-
-# Now try 1-norm and TV denoising with FBPD, first 1-norm.
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 64
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Identity operator for denoising
-I = TomoIdentity(ig)
-
-# Data and add noise
-y = I.direct(Phantom)
-y.array = y.array + 0.1*np.random.randn(N, N)
-
-plt.imshow(y.array)
-plt.title('Noisy image')
-plt.show()
-
-
-###################
-# Data fidelity term
-f_denoise = Norm2sq(I,y,c=0.5,memopt=True)
-
-# 1-norm regulariser
-lam1_denoise = 1.0
-g1_denoise = Norm1(lam1_denoise)
-
-# Initial guess
-x_init_denoise = ImageData(np.zeros((N,N)))
-
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = \
- FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-
-print(x_fista1_denoise)
-print(criter1_denoise[-1])
-
-f_2 =
-gd = GradientDescent(x_init=x_init_denoise,
- objective_function=f, rate=0.001)
-gd.max_iteration = 5000
-
-for i,el in enumerate(gd):
- if i%100 == 0:
- print ("\rIteration {} Loss: {}".format(gd.iteration,
- gd.get_current_loss()))
-
-plt.imshow(gd.get_output().as_array())
-plt.title('GD image')
-plt.show()
-
diff --git a/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py b/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py
deleted file mode 100644
index 8370c78..0000000
--- a/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# This script demonstrates how to load IMAT fits data
-# into the CIL optimisation framework and run reconstruction methods.
-#
-# Demo to reconstruct energy-discretized channels of IMAT data
-
-# needs dxchange: conda install -c conda-forge dxchange
-# needs astropy: conda install astropy
-
-
-# All third-party imports.
-import numpy as np
-import matplotlib.pyplot as plt
-from dxchange.reader import read_fits
-from astropy.io import fits
-
-# All own imports.
-from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData, DataContainer
-from ccpi.astra.ops import AstraProjectorSimple, AstraProjector3DSimple
-from ccpi.optimisation.algs import CGLS, FISTA
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-from ccpi.plugins.regularisers import FGP_TV
-
-# set main parameters here
-n = 512
-totalAngles = 250 # total number of projection angles
-# spectral discretization parameter
-num_average = 145 # channel discretization frequency (total number of averaged channels)
-numChannels = 2970 # 2970
-totChannels = round(numChannels/num_average) # the resulting number of channels
-Projections_stack = np.zeros((num_average,n,n),dtype='uint16')
-ProjAngleChannels = np.zeros((totalAngles,totChannels,n,n),dtype='float32')
-
-#########################################################################
-print ("Loading the data...")
-MainPath = '/media/jakob/050d8d45-fab3-4285-935f-260e6c5f162c1/Data/neutrondata/' # path to data
-pathname0 = '{!s}{!s}'.format(MainPath,'PSI_phantom_IMAT/DATA/Sample/')
-counterFileName = 4675
-# A main loop over all available angles
-for ll in range(0,totalAngles,1):
- pathnameData = '{!s}{!s}{!s}/'.format(pathname0,'angle',str(ll))
- filenameCurr = '{!s}{!s}{!s}'.format('IMAT0000',str(counterFileName),'_Tomo_test_000_')
- counterT = 0
- # loop over reduced channels (discretized)
- for i in range(0,totChannels,1):
- sumCount = 0
- # loop over actual channels to obtain averaged one
- for j in range(0,num_average,1):
- if counterT < 10:
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'0000',str(counterT))
- if ((counterT >= 10) & (counterT < 100)):
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'000',str(counterT))
- if ((counterT >= 100) & (counterT < 1000)):
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'00',str(counterT))
- if ((counterT >= 1000) & (counterT < 10000)):
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'0',str(counterT))
- try:
- Projections_stack[j,:,:] = read_fits(outfile)
- except:
- print("Fits is corrupted, skipping no.", counterT)
- sumCount -= 1
- counterT += 1
- sumCount += 1
- AverageProj=np.sum(Projections_stack,axis=0)/sumCount # averaged projection over "num_average" channels
- ProjAngleChannels[ll,i,:,:] = AverageProj
- print("Angle is processed", ll)
- counterFileName += 1
-#########################################################################
-
-flat1 = read_fits('{!s}{!s}{!s}'.format(MainPath,'PSI_phantom_IMAT/DATA/','OpenBeam_aft1/IMAT00004932_Tomo_test_000_SummedImg.fits'))
-nonzero = flat1 > 0
-# Apply flat field and take negative log
-for ll in range(0,totalAngles,1):
- for i in range(0,totChannels,1):
- ProjAngleChannels[ll,i,nonzero] = ProjAngleChannels[ll,i,nonzero]/flat1[nonzero]
-
-eqzero = ProjAngleChannels == 0
-ProjAngleChannels[eqzero] = 1
-ProjAngleChannels_NormLog = -np.log(ProjAngleChannels) # normalised and neg-log data
-
-# extact sinogram over energy channels
-selectedVertical_slice = 256
-sino_all_channels = ProjAngleChannels_NormLog[:,:,:,selectedVertical_slice]
-# Set angles to use
-angles = np.linspace(-np.pi,np.pi,totalAngles,endpoint=False)
-
-# set the geometry
-ig = ImageGeometry(n,n)
-ag = AcquisitionGeometry('parallel',
- '2D',
- angles,
- n,1)
-Aop = AstraProjectorSimple(ig, ag, 'gpu')
-
-
-# loop to reconstruct energy channels
-REC_chan = np.zeros((totChannels, n, n), 'float32')
-for i in range(0,totChannels,1):
- sino_channel = sino_all_channels[:,i,:] # extract a sinogram for i-th channel
-
- print ("Initial guess")
- x_init = ImageData(geometry=ig)
-
- # Create least squares object instance with projector and data.
- print ("Create least squares object instance with projector and data.")
- f = Norm2sq(Aop,DataContainer(sino_channel),c=0.5)
-
- print ("Run FISTA-TV for least squares")
- lamtv = 5
- opt = {'tol': 1e-4, 'iter': 200}
- g_fgp = FGP_TV(lambdaReg = lamtv,
- iterationsTV=50,
- tolerance=1e-6,
- methodTV=0,
- nonnegativity=0,
- printing=0,
- device='gpu')
-
- x_fista_fgp, it1, timing1, criter_fgp = FISTA(x_init, f, g_fgp, opt)
- REC_chan[i,:,:] = x_fista_fgp.array
- """
- plt.figure()
- plt.subplot(121)
- plt.imshow(x_fista_fgp.array, vmin=0, vmax=0.05)
- plt.title('FISTA FGP TV')
- plt.subplot(122)
- plt.semilogy(criter_fgp)
- plt.show()
- """
- """
- print ("Run CGLS for least squares")
- opt = {'tol': 1e-4, 'iter': 20}
- x_init = ImageData(geometry=ig)
- x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, DataContainer(sino_channel), opt=opt)
-
- plt.figure()
- plt.imshow(x_CGLS.array,vmin=0, vmax=0.05)
- plt.title('CGLS')
- plt.show()
- """
-# Saving images into fits using astrapy if required
-counter = 0
-filename = 'FISTA_TV_imat_slice'
-for i in range(totChannels):
- im = REC_chan[i,:,:]
- add_val = np.min(im[:])
- im += abs(add_val)
- im = im/np.max(im[:])*65535
- outfile = '{!s}_{!s}_{!s}.fits'.format(filename,str(selectedVertical_slice),str(counter))
- hdu = fits.PrimaryHDU(np.uint16(im))
- hdu.writeto(outfile, overwrite=True)
- counter += 1 \ No newline at end of file
diff --git a/Wrappers/Python/wip/demo_imat_whitebeam.py b/Wrappers/Python/wip/demo_imat_whitebeam.py
deleted file mode 100644
index e0d213e..0000000
--- a/Wrappers/Python/wip/demo_imat_whitebeam.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# This script demonstrates how to load IMAT fits data
-# into the CIL optimisation framework and run reconstruction methods.
-#
-# This demo loads the summedImg files which are the non-spectral images
-# resulting from summing projections over all spectral channels.
-
-# needs dxchange: conda install -c conda-forge dxchange
-# needs astropy: conda install astropy
-
-
-# All third-party imports.
-import numpy
-from scipy.io import loadmat
-import matplotlib.pyplot as plt
-from dxchange.reader import read_fits
-
-# All own imports.
-from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData
-from ccpi.astra.ops import AstraProjectorSimple, AstraProjector3DSimple
-from ccpi.optimisation.algs import CGLS, FISTA
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-
-# Load and display a couple of summed projection as examples
-pathname0 = '/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/Sample/angle0/'
-filename0 = 'IMAT00004675_Tomo_test_000_SummedImg.fits'
-
-data0 = read_fits(pathname0 + filename0)
-
-pathname10 = '/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/Sample/angle10/'
-filename10 = 'IMAT00004685_Tomo_test_000_SummedImg.fits'
-
-data10 = read_fits(pathname10 + filename10)
-
-# Load a flat field (more are available, should we average over them?)
-flat1 = read_fits('/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/OpenBeam_aft1/IMAT00004932_Tomo_test_000_SummedImg.fits')
-
-# Apply flat field and display after flat-field correction and negative log
-data0_rel = numpy.zeros(numpy.shape(flat1), dtype = float)
-nonzero = flat1 > 0
-data0_rel[nonzero] = data0[nonzero] / flat1[nonzero]
-data10_rel = numpy.zeros(numpy.shape(flat1), dtype = float)
-data10_rel[nonzero] = data10[nonzero] / flat1[nonzero]
-
-plt.imshow(data0_rel)
-plt.colorbar()
-plt.show()
-
-plt.imshow(-numpy.log(data0_rel))
-plt.colorbar()
-plt.show()
-
-plt.imshow(data10_rel)
-plt.colorbar()
-plt.show()
-
-plt.imshow(-numpy.log(data10_rel))
-plt.colorbar()
-plt.show()
-
-# Set up for loading all summed images at 250 angles.
-pathname = '/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/Sample/angle{}/'
-filename = 'IMAT0000{}_Tomo_test_000_SummedImg.fits'
-
-# Dimensions
-num_angles = 250
-imsize = 512
-
-# Initialise array
-data = numpy.zeros((num_angles,imsize,imsize))
-
-# Load only 0-249, as 250 is at repetition of zero degrees just like image 0
-for i in range(0,250):
- curimfile = (pathname + filename).format(i, i+4675)
- data[i,:,:] = read_fits(curimfile)
-
-# Apply flat field and take negative log
-nonzero = flat1 > 0
-for i in range(0,250):
- data[i,nonzero] = data[i,nonzero]/flat1[nonzero]
-
-eqzero = data == 0
-data[eqzero] = 1
-
-data_rel = -numpy.log(data)
-
-# Permute order to get: angles, vertical, horizontal, as default in framework.
-data_rel = numpy.transpose(data_rel,(0,2,1))
-
-# Set angles to use
-angles = numpy.linspace(-numpy.pi,numpy.pi,num_angles,endpoint=False)
-
-# Create 3D acquisition geometry and acquisition data
-ag = AcquisitionGeometry('parallel',
- '3D',
- angles,
- pixel_num_h=imsize,
- pixel_num_v=imsize)
-b = AcquisitionData(data_rel, geometry=ag)
-
-# Reduce to single (noncentral) slice by extracting relevant parameters from data and its
-# geometry. Perhaps create function to extract central slice automatically?
-b2d = b.subset(vertical=128)
-ag2d = AcquisitionGeometry('parallel',
- '2D',
- ag.angles,
- pixel_num_h=ag.pixel_num_h)
-b2d.geometry = ag2d
-
-# Create 2D image geometry
-ig2d = ImageGeometry(voxel_num_x=ag2d.pixel_num_h,
- voxel_num_y=ag2d.pixel_num_h)
-
-# Create GPU projector/backprojector operator with ASTRA.
-Aop = AstraProjectorSimple(ig2d,ag2d,'gpu')
-
-# Demonstrate operator is working by applying simple backprojection.
-z = Aop.adjoint(b2d)
-plt.imshow(z.array)
-plt.title('Simple backprojection')
-plt.colorbar()
-plt.show()
-
-# Set initial guess ImageData with zeros for algorithms, and algorithm options.
-x_init = ImageData(numpy.zeros((imsize,imsize)),
- geometry=ig2d)
-opt_CGLS = {'tol': 1e-4, 'iter': 20}
-
-# Run CGLS algorithm and display reconstruction.
-x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b2d, opt_CGLS)
-
-plt.imshow(x_CGLS.array)
-plt.title('CGLS')
-plt.colorbar()
-plt.show()
-
-plt.semilogy(criter_CGLS)
-plt.title('CGLS Criterion vs iterations')
-plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/wip/demo_memhandle.py b/Wrappers/Python/wip/demo_memhandle.py
deleted file mode 100755
index db48d73..0000000
--- a/Wrappers/Python/wip/demo_memhandle.py
+++ /dev/null
@@ -1,193 +0,0 @@
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS
-from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D
-
-from ccpi.optimisation.ops import LinearOperatorMatrix, Identity
-from ccpi.optimisation.ops import TomoIdentity
-
-# Requires CVXPY, see http://www.cvxpy.org/
-# CVXPY can be installed in anaconda using
-# conda install -c cvxgrp cvxpy libgcc
-
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-# Problem data.
-m = 30
-n = 20
-np.random.seed(1)
-Amat = np.random.randn(m, n)
-A = LinearOperatorMatrix(Amat)
-bmat = np.random.randn(m)
-bmat.shape = (bmat.shape[0],1)
-
-
-
-# A = Identity()
-# Change n to equal to m.
-
-b = DataContainer(bmat)
-
-# Regularization parameter
-lam = 10
-
-# Create object instances with the test data A and b.
-f = Norm2sq(A,b,c=0.5, memopt=True)
-g0 = ZeroFun()
-
-# Initial guess
-x_init = DataContainer(np.zeros((n,1)))
-
-f.grad(x_init)
-opt = {'memopt': True}
-# Run FISTA for least squares plus zero function.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0)
-x_fista0_m, it0_m, timing0_m, criter0_m = FISTA(x_init, f, g0, opt=opt)
-
-iternum = [i for i in range(len(criter0))]
-# Print solution and final objective/criterion value for comparison
-print("FISTA least squares plus zero function solution and objective value:")
-print(x_fista0.array)
-print(criter0[-1])
-
-
-# Plot criterion curve to see FISTA converge to same value as CVX.
-#iternum = np.arange(1,1001)
-plt.figure()
-plt.loglog(iternum,criter0,label='FISTA LS')
-plt.loglog(iternum,criter0_m,label='FISTA LS memopt')
-plt.legend()
-plt.show()
-#%%
-# Create 1-norm object instance
-g1 = Norm1(lam)
-
-g1(x_init)
-g1.prox(x_init,0.02)
-
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1)
-x_fista1_m, it1_m, timing1_m, criter1_m = FISTA(x_init, f, g1, opt=opt)
-iternum = [i for i in range(len(criter1))]
-# Print for comparison
-print("FISTA least squares plus 1-norm solution and objective value:")
-print(x_fista1)
-print(criter1[-1])
-
-
-# Now try another algorithm FBPD for same problem:
-x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1)
-iternum = [i for i in range(len(criterfbpd1))]
-print(x_fbpd1)
-print(criterfbpd1[-1])
-
-# Plot criterion curve to see both FISTA and FBPD converge to same value.
-# Note that FISTA is very efficient for 1-norm minimization so it beats
-# FBPD in this test by a lot. But FBPD can handle a larger class of problems
-# than FISTA can.
-plt.figure()
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.loglog(iternum,criter1_m,label='FISTA LS+1 memopt')
-plt.legend()
-plt.show()
-
-plt.figure()
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
-plt.legend()
-plt.show()
-#%%
-# Now try 1-norm and TV denoising with FBPD, first 1-norm.
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 1000
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Identity operator for denoising
-I = TomoIdentity(ig)
-
-# Data and add noise
-y = I.direct(Phantom)
-y.array += 0.1*np.random.randn(N, N)
-
-plt.figure()
-plt.imshow(y.array)
-plt.title('Noisy image')
-plt.show()
-
-# Data fidelity term
-f_denoise = Norm2sq(I,y,c=0.5, memopt=True)
-
-# 1-norm regulariser
-lam1_denoise = 1.0
-g1_denoise = Norm1(lam1_denoise)
-
-# Initial guess
-x_init_denoise = ImageData(np.zeros((N,N)))
-opt = {'memopt': False, 'iter' : 50}
-# Combine with least squares and solve using generic FISTA implementation
-print ("no memopt")
-x_fista1_denoise, it1_denoise, timing1_denoise, \
- criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-opt = {'memopt': True, 'iter' : 50}
-print ("yes memopt")
-x_fista1_denoise_m, it1_denoise_m, timing1_denoise_m, \
- criter1_denoise_m = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-
-print(x_fista1_denoise)
-print(criter1_denoise[-1])
-
-plt.figure()
-plt.imshow(x_fista1_denoise.as_array())
-plt.title('FISTA LS+1')
-plt.show()
-
-plt.figure()
-plt.imshow(x_fista1_denoise_m.as_array())
-plt.title('FISTA LS+1 memopt')
-plt.show()
-
-plt.figure()
-plt.loglog(iternum,criter1_denoise,label='FISTA LS+1')
-plt.loglog(iternum,criter1_denoise_m,label='FISTA LS+1 memopt')
-plt.legend()
-plt.show()
-#%%
-# Now denoise LS + 1-norm with FBPD
-x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise)
-print(x_fbpd1_denoise)
-print(criterfbpd1_denoise[-1])
-
-plt.figure()
-plt.imshow(x_fbpd1_denoise.as_array())
-plt.title('FBPD LS+1')
-plt.show()
-
-
-# Now TV with FBPD
-lam_tv = 0.1
-gtv = TV2D(lam_tv)
-gtv(gtv.op.direct(x_init_denoise))
-
-opt_tv = {'tol': 1e-4, 'iter': 10000}
-
-x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv)
-print(x_fbpdtv_denoise)
-print(criterfbpdtv_denoise[-1])
-
-plt.imshow(x_fbpdtv_denoise.as_array())
-plt.title('FBPD TV')
-plt.show()
diff --git a/Wrappers/Python/wip/fix_test.py b/Wrappers/Python/wip/fix_test.py
deleted file mode 100755
index b1006c0..0000000
--- a/Wrappers/Python/wip/fix_test.py
+++ /dev/null
@@ -1,208 +0,0 @@
-import numpy as np
-import numpy
-from ccpi.optimisation.operators import *
-from ccpi.optimisation.algorithms import *
-from ccpi.optimisation.functions import *
-from ccpi.framework import *
-
-def isSizeCorrect(data1 ,data2):
- if issubclass(type(data1), DataContainer) and \
- issubclass(type(data2), DataContainer):
- # check dimensionality
- if data1.check_dimensions(data2):
- return True
- elif issubclass(type(data1) , numpy.ndarray) and \
- issubclass(type(data2) , numpy.ndarray):
- return data1.shape == data2.shape
- else:
- raise ValueError("{0}: getting two incompatible types: {1} {2}"\
- .format('Function', type(data1), type(data2)))
- return False
-
-class Norm1(Function):
-
- def __init__(self,gamma):
- super(Norm1, self).__init__()
- self.gamma = gamma
- self.L = 1
- self.sign_x = None
-
- def __call__(self,x,out=None):
- if out is None:
- return self.gamma*(x.abs().sum())
- else:
- if not x.shape == out.shape:
- raise ValueError('Norm1 Incompatible size:',
- x.shape, out.shape)
- x.abs(out=out)
- return out.sum() * self.gamma
-
- def prox(self,x,tau):
- return (x.abs() - tau*self.gamma).maximum(0) * x.sign()
-
- def proximal(self, x, tau, out=None):
- if out is None:
- return self.prox(x, tau)
- else:
- if isSizeCorrect(x,out):
- # check dimensionality
- if issubclass(type(out), DataContainer):
- v = (x.abs() - tau*self.gamma).maximum(0)
- x.sign(out=out)
- out *= v
- #out.fill(self.prox(x,tau))
- elif issubclass(type(out) , numpy.ndarray):
- v = (x.abs() - tau*self.gamma).maximum(0)
- out[:] = x.sign()
- out *= v
- #out[:] = self.prox(x,tau)
- else:
- raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) )
-
-opt = {'memopt': True}
-# Problem data.
-m = 500
-n = 200
-
-# if m < n then the problem is under-determined and algorithms will struggle to find a solution.
-# One approach is to add regularisation
-
-#np.random.seed(1)
-Amat = np.asarray( np.random.randn(m, n), dtype=numpy.float32)
-Amat = np.asarray( np.random.random_integers(1,10, (m, n)), dtype=numpy.float32)
-#Amat = np.asarray(np.eye(m), dtype=np.float32) * 2
-A = LinearOperatorMatrix(Amat)
-bmat = np.asarray( np.random.randn(m), dtype=numpy.float32)
-#bmat *= 0
-#bmat += 2
-print ("bmat", bmat.shape)
-print ("A", A.A)
-#bmat.shape = (bmat.shape[0], 1)
-
-# A = Identity()
-# Change n to equal to m.
-vgb = VectorGeometry(m)
-vgx = VectorGeometry(n)
-b = vgb.allocate(VectorGeometry.RANDOM_INT, dtype=numpy.float32)
-# b.fill(bmat)
-#b = DataContainer(bmat)
-
-# Regularization parameter
-lam = 10
-opt = {'memopt': True}
-# Create object instances with the test data A and b.
-f = Norm2Sq(A, b, c=1., memopt=True)
-#f = FunctionOperatorComposition(A, L2NormSquared(b=bmat))
-g0 = ZeroFunction()
-
-#f.L = 30.003
-x_init = vgx.allocate(VectorGeometry.RANDOM, dtype=numpy.float32)
-x_initcgls = x_init.copy()
-
-a = VectorData(x_init.as_array(), deep_copy=True)
-
-assert id(x_init.as_array()) != id(a.as_array())
-
-
-#f.L = LinearOperator.PowerMethod(A, 25, x_init)[0]
-#print ('f.L', f.L)
-rate = (1 / f.L) / 6
-#f.L *= 12
-
-# Initial guess
-#x_init = DataContainer(np.zeros((n, 1)))
-print ('x_init', x_init.as_array())
-print ('b', b.as_array())
-# Create 1-norm object instance
-g1_new = lam * L1Norm()
-g1 = Norm1(lam)
-
-g1 = ZeroFunction()
-#g1(x_init)
-x = g1.prox(x_init, 1/f.L )
-print ("g1.proximal ", x.as_array())
-
-x = g1.prox(x_init, 0.03 )
-print ("g1.proximal ", x.as_array())
-x = g1_new.proximal(x_init, 0.03 )
-print ("g1.proximal ", x.as_array())
-
-x1 = vgx.allocate(VectorGeometry.RANDOM, dtype=numpy.float32)
-pippo = vgx.allocate()
-
-print ("x_init", x_init.as_array())
-print ("x1", x1.as_array())
-a = x_init.subtract(x1, out=pippo)
-
-print ("pippo", pippo.as_array())
-print ("x_init", x_init.as_array())
-print ("x1", x1.as_array())
-
-y = A.direct(x_init)
-y *= 0
-A.direct(x_init, out=y)
-
-# Combine with least squares and solve using generic FISTA implementation
-#x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt)
-def callback(it, objective, solution):
- print ("callback " , it , objective, f(solution))
-
-fa = FISTA(x_init=x_init, f=f, g=g1)
-fa.max_iteration = 1000
-fa.update_objective_interval = int( fa.max_iteration / 10 )
-fa.run(fa.max_iteration, callback = None, verbose=True)
-
-gd = GradientDescent(x_init=x_init, objective_function=f, rate = rate )
-gd.max_iteration = 5000
-gd.update_objective_interval = int( gd.max_iteration / 10 )
-gd.run(gd.max_iteration, callback = None, verbose=True)
-
-
-
-cgls = CGLS(x_init= x_initcgls, operator=A, data=b)
-cgls.max_iteration = 1000
-cgls.update_objective_interval = int( cgls.max_iteration / 10 )
-
-#cgls.should_stop = stop_criterion(cgls)
-cgls.run(cgls.max_iteration, callback = callback, verbose=True)
-
-
-
-# Print for comparison
-print("FISTA least squares plus 1-norm solution and objective value:")
-print(fa.get_output().as_array())
-print(fa.get_last_objective())
-
-print ("data ", b.as_array())
-print ('FISTA ', A.direct(fa.get_output()).as_array())
-print ('GradientDescent', A.direct(gd.get_output()).as_array())
-print ('CGLS ', A.direct(cgls.get_output()).as_array())
-
-cond = numpy.linalg.cond(A.A)
-
-print ("cond" , cond)
-
-#%%
-try:
- import cvxpy as cp
- # Construct the problem.
- x = cp.Variable(n)
- objective = cp.Minimize(cp.sum_squares(A.A*x - bmat))
- prob = cp.Problem(objective)
- # The optimal objective is returned by prob.solve().
- result = prob.solve(solver = cp.MOSEK)
-
- print ('CGLS ', cgls.get_output().as_array())
- print ('CVX ', x.value)
-
- print ('FISTA ', fa.get_output().as_array())
- print ('GD ', gd.get_output().as_array())
-except ImportError as ir:
- pass
-
- #%%
-
-
-
-
-
diff --git a/Wrappers/Python/wip/multifile_nexus.py b/Wrappers/Python/wip/multifile_nexus.py
deleted file mode 100755
index d1ad438..0000000
--- a/Wrappers/Python/wip/multifile_nexus.py
+++ /dev/null
@@ -1,307 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Aug 15 16:00:53 2018
-
-@author: ofn77899
-"""
-
-import os
-from ccpi.io.reader import NexusReader
-
-from sys import getsizeof
-
-import matplotlib.pyplot as plt
-
-from ccpi.framework import DataProcessor, DataContainer
-from ccpi.processors import Normalizer
-from ccpi.processors import CenterOfRotationFinder
-import numpy
-
-
-class averager(object):
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.N = 0
- self.avg = 0
- self.min = 0
- self.max = 0
- self.var = 0
- self.ske = 0
- self.kur = 0
-
- def add_reading(self, val):
-
- if (self.N == 0):
- self.avg = val;
- self.min = val;
- self.max = val;
- elif (self.N == 1):
- #//set min/max
- self.max = val if val > self.max else self.max
- self.min = val if val < self.min else self.min
-
-
- thisavg = (self.avg + val)/2
- #// initial value is in avg
- self.var = (self.avg - thisavg)*(self.avg-thisavg) + (val - thisavg) * (val-thisavg)
- self.ske = self.var * (self.avg - thisavg)
- self.kur = self.var * self.var
- self.avg = thisavg
- else:
- self.max = val if val > self.max else self.max
- self.min = val if val < self.min else self.min
-
- M = self.N
-
- #// b-factor =(<v>_N + v_(N+1)) / (N+1)
- #float b = (val -avg) / (M+1);
- b = (val -self.avg) / (M+1)
-
- self.kur = self.kur + (M *(b*b*b*b) * (1+M*M*M))- (4* b * self.ske) + (6 * b *b * self.var * (M-1))
-
- self.ske = self.ske + (M * b*b*b *(M-1)*(M+1)) - (3 * b * self.var * (M-1))
-
- #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
- self.var = self.var * ((M-1)/M) + (b * b * (M+1))
-
- self.avg = self.avg * (M/(M+1)) + val / (M+1)
-
- self.N += 1
-
- def stats(self, vector):
- i = 0
- while i < vector.size:
- self.add_reading(vector[i])
- i+=1
-
-avg = averager()
-a = numpy.linspace(0,39,40)
-avg.stats(a)
-print ("average" , avg.avg, a.mean())
-print ("variance" , avg.var, a.var())
-b = a - a.mean()
-b *= b
-b = numpy.sqrt(sum(b)/(a.size-1))
-print ("std" , numpy.sqrt(avg.var), b)
-#%%
-
-class DataStatMoments(DataProcessor):
- '''Normalization based on flat and dark
-
- This processor read in a AcquisitionData and normalises it based on
- the instrument reading with and without incident photons or neutrons.
-
- Input: AcquisitionData
- Parameter: 2D projection with flat field (or stack)
- 2D projection with dark field (or stack)
- Output: AcquisitionDataSetn
- '''
-
- def __init__(self, axis, skewness=False, kurtosis=False, offset=0):
- kwargs = {
- 'axis' : axis,
- 'skewness' : skewness,
- 'kurtosis' : kurtosis,
- 'offset' : offset,
- }
- #DataProcessor.__init__(self, **kwargs)
- super(DataStatMoments, self).__init__(**kwargs)
-
-
- def check_input(self, dataset):
- #self.N = dataset.get_dimension_size(self.axis)
- return True
-
- @staticmethod
- def add_sample(dataset, N, axis, stats=None, offset=0):
- #dataset = self.get_input()
- if (N == 0):
- # get the axis number along to calculate the stats
-
-
- #axs = dataset.get_dimension_size(self.axis)
- # create a placeholder for the output
- if stats is None:
- ax = dataset.get_dimension_axis(axis)
- shape = [dataset.shape[i] for i in range(len(dataset.shape)) if i != ax]
- # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
- shape.insert(0, 4+2)
- stats = numpy.zeros(shape)
-
- stats[0] = dataset.subset(**{axis:N-offset}).array[:]
-
- #avg = val
- elif (N == 1):
- # val
- stats[5] = dataset.subset(**{axis:N-offset}).array
- stats[4] = stats[0] + stats[5]
- stats[4] /= 2 # thisavg
- stats[5] -= stats[4] # (val - thisavg)
-
- #float thisavg = (avg + val)/2;
-
- #// initial value is in avg
- #var = (avg - thisavg)*(avg-thisavg) + (val - thisavg) * (val-thisavg);
- stats[1] = stats[5] * stats[5] + stats[5] * stats[5]
- #skewness = var * (avg - thisavg);
- stats[2] = stats[1] * stats[5]
- #kurtosis = var * var;
- stats[3] = stats[1] * stats[1]
- #avg = thisavg;
- stats[0] = stats[4]
- else:
-
- #float M = (float)N;
- M = N
- #val
- stats[4] = dataset.subset(**{axis:N-offset}).array
- #// b-factor =(<v>_N + v_(N+1)) / (N+1)
- stats[5] = stats[4] - stats[0]
- stats[5] /= (M+1) # b factor
- #float b = (val -avg) / (M+1);
-
- #kurtosis = kurtosis + (M *(b*b*b*b) * (1+M*M*M))- (4* b * skewness) + (6 * b *b * var * (M-1));
- #if self.kurtosis:
- # stats[3] += (M * stats[5] * stats[5] * stats[5] * stats[5]) - \
- # (4 * stats[5] * stats[2]) + \
- # (6 * stats[5] * stats[5] * stats[1] * (M-1))
-
- #skewness = skewness + (M * b*b*b *(M-1)*(M+1)) - (3 * b * var * (M-1));
- #if self.skewness:
- # stats[2] = stats[2] + (M * stats[5]* stats[5] * stats[5] * (M-1)*(M-1) ) -\
- # 3 * stats[5] * stats[1] * (M-1)
- #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
- #var = var * ((M-1)/M) + (b * b * (M+1));
- stats[1] = ((M-1)/M) * stats[1] + (stats[5] * stats[5] * (M+1))
-
- #avg = avg * (M/(M+1)) + val / (M+1)
- stats[0] = stats[0] * (M/(M+1)) + stats[4] / (M+1)
-
- N += 1
- return stats , N
-
-
- def process(self):
-
- data = self.get_input()
-
- #stats, i = add_sample(0)
- N = data.get_dimension_size(self.axis)
- ax = data.get_dimension_axis(self.axis)
- stats = None
- i = 0
- while i < N:
- stats , i = DataStatMoments.add_sample(data, i, self.axis, stats, offset=self.offset)
- self.offset += N
- labels = ['StatisticalMoments']
-
- labels += [data.dimension_labels[i] \
- for i in range(len(data.dimension_labels)) if i != ax]
- y = DataContainer( stats[:4] , False,
- dimension_labels=labels)
- return y
-
-directory = r'E:\Documents\Dataset\CCPi\Nexus_test'
-data_path="entry1/instrument/pco1_hw_hdf_nochunking/data"
-
-reader = NexusReader(os.path.join( os.path.abspath(directory) , '74331.nxs'))
-
-print ("read flat")
-read_flat = NexusReader(os.path.join( os.path.abspath(directory) , '74240.nxs'))
-read_flat.data_path = data_path
-flatsslice = read_flat.get_acquisition_data_whole()
-avg = DataStatMoments('angle')
-
-avg.set_input(flatsslice)
-flats = avg.get_output()
-
-ave = averager()
-ave.stats(flatsslice.array[:,0,0])
-
-print ("avg" , ave.avg, flats.array[0][0][0])
-print ("var" , ave.var, flats.array[1][0][0])
-
-print ("read dark")
-read_dark = NexusReader(os.path.join( os.path.abspath(directory) , '74243.nxs'))
-read_dark.data_path = data_path
-
-## darks are very many so we proceed in batches
-total_size = read_dark.get_projection_dimensions()[0]
-print ("total_size", total_size)
-
-batchsize = 40
-if batchsize > total_size:
- batchlimits = [batchsize * (i+1) for i in range(int(total_size/batchsize))] + [total_size-1]
-else:
- batchlimits = [total_size]
-#avg.N = 0
-avg.offset = 0
-N = 0
-for batch in range(len(batchlimits)):
- print ("running batch " , batch)
- bmax = batchlimits[batch]
- bmin = bmax-batchsize
-
- darksslice = read_dark.get_acquisition_data_batch(bmin,bmax)
- if batch == 0:
- #create stats
- ax = darksslice.get_dimension_axis('angle')
- shape = [darksslice.shape[i] for i in range(len(darksslice.shape)) if i != ax]
- # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
- shape.insert(0, 4+2)
- print ("create stats shape ", shape)
- stats = numpy.zeros(shape)
- print ("N" , N)
- #avg.set_input(darksslice)
- i = bmin
- while i < bmax:
- stats , i = DataStatMoments.add_sample(darksslice, i, 'angle', stats, bmin)
- print ("{0}-{1}-{2}".format(bmin, i , bmax ) )
-
-darks = stats
-#%%
-
-fig = plt.subplot(2,2,1)
-fig.imshow(flats.subset(StatisticalMoments=0).array)
-fig = plt.subplot(2,2,2)
-fig.imshow(numpy.sqrt(flats.subset(StatisticalMoments=1).array))
-fig = plt.subplot(2,2,3)
-fig.imshow(darks[0])
-fig = plt.subplot(2,2,4)
-fig.imshow(numpy.sqrt(darks[1]))
-plt.show()
-
-
-#%%
-norm = Normalizer(flat_field=flats.array[0,200,:], dark_field=darks[0,200,:])
-#norm.set_flat_field(flats.array[0,200,:])
-#norm.set_dark_field(darks.array[0,200,:])
-norm.set_input(reader.get_acquisition_data_slice(200))
-
-n = Normalizer.normalize_projection(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:], 1e-5)
-#dn_n= Normalizer.estimate_normalised_error(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:],
-# numpy.sqrt(flats.array[1,200,:]), numpy.sqrt(darks[1,200,:]))
-#%%
-
-
-#%%
-fig = plt.subplot(2,1,1)
-
-
-fig.imshow(norm.get_input().as_array())
-fig = plt.subplot(2,1,2)
-fig.imshow(n)
-
-#fig = plt.subplot(3,1,3)
-#fig.imshow(dn_n)
-
-
-plt.show()
-
-
-
-
-
-
diff --git a/Wrappers/Python/wip/old_demos/demo_colourbay.py b/Wrappers/Python/wip/old_demos/demo_colourbay.py
deleted file mode 100644
index 5dbf2e1..0000000
--- a/Wrappers/Python/wip/old_demos/demo_colourbay.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# This script demonstrates how to load a mat-file with UoM colour-bay data
-# into the CIL optimisation framework and run (simple) multichannel
-# reconstruction methods.
-
-# All third-party imports.
-import numpy
-from scipy.io import loadmat
-import matplotlib.pyplot as plt
-
-# All own imports.
-from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData
-from ccpi.astra.ops import AstraProjectorMC
-from ccpi.optimisation.algs import CGLS, FISTA
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-
-# Load full data and permute to expected ordering. Change path as necessary.
-# The loaded X has dims 80x60x80x150, which is pix x angle x pix x channel.
-# Permute (numpy.transpose) puts into our default ordering which is
-# (channel, angle, vertical, horizontal).
-
-pathname = '/media/jakob/050d8d45-fab3-4285-935f-260e6c5f162c1/Data/ColourBay/spectral_data_sets/CarbonPd/'
-filename = 'carbonPd_full_sinogram_stripes_removed.mat'
-
-X = loadmat(pathname + filename)
-X = numpy.transpose(X['SS'],(3,1,2,0))
-
-# Store geometric variables for reuse
-num_channels = X.shape[0]
-num_pixels_h = X.shape[3]
-num_pixels_v = X.shape[2]
-num_angles = X.shape[1]
-
-# Display a single projection in a single channel
-plt.imshow(X[100,5,:,:])
-plt.title('Example of a projection image in one channel' )
-plt.show()
-
-# Set angles to use
-angles = numpy.linspace(-numpy.pi,numpy.pi,num_angles,endpoint=False)
-
-# Define full 3D acquisition geometry and data container.
-# Geometric info is taken from the txt-file in the same dir as the mat-file
-ag = AcquisitionGeometry('cone',
- '3D',
- angles,
- pixel_num_h=num_pixels_h,
- pixel_size_h=0.25,
- pixel_num_v=num_pixels_v,
- pixel_size_v=0.25,
- dist_source_center=233.0,
- dist_center_detector=245.0,
- channels=num_channels)
-data = AcquisitionData(X, geometry=ag)
-
-# Reduce to central slice by extracting relevant parameters from data and its
-# geometry. Perhaps create function to extract central slice automatically?
-data2d = data.subset(vertical=40)
-ag2d = AcquisitionGeometry('cone',
- '2D',
- ag.angles,
- pixel_num_h=ag.pixel_num_h,
- pixel_size_h=ag.pixel_size_h,
- pixel_num_v=1,
- pixel_size_v=ag.pixel_size_h,
- dist_source_center=ag.dist_source_center,
- dist_center_detector=ag.dist_center_detector,
- channels=ag.channels)
-data2d.geometry = ag2d
-
-# Set up 2D Image Geometry.
-# First need the geometric magnification to scale the voxel size relative
-# to the detector pixel size.
-mag = (ag.dist_source_center + ag.dist_center_detector)/ag.dist_source_center
-ig2d = ImageGeometry(voxel_num_x=ag2d.pixel_num_h,
- voxel_num_y=ag2d.pixel_num_h,
- voxel_size_x=ag2d.pixel_size_h/mag,
- voxel_size_y=ag2d.pixel_size_h/mag,
- channels=X.shape[0])
-
-# Create GPU multichannel projector/backprojector operator with ASTRA.
-Aall = AstraProjectorMC(ig2d,ag2d,'gpu')
-
-# Compute and simple backprojction and display one channel as image.
-Xbp = Aall.adjoint(data2d)
-plt.imshow(Xbp.subset(channel=100).array)
-plt.show()
-
-# Set initial guess ImageData with zeros for algorithms, and algorithm options.
-x_init = ImageData(numpy.zeros((num_channels,num_pixels_v,num_pixels_h)),
- geometry=ig2d,
- dimension_labels=['channel','horizontal_y','horizontal_x'])
-opt_CGLS = {'tol': 1e-4, 'iter': 5}
-
-# Run CGLS algorithm and display one channel.
-x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aall, data2d, opt_CGLS)
-
-plt.imshow(x_CGLS.subset(channel=100).array)
-plt.title('CGLS')
-plt.show()
-
-plt.semilogy(criter_CGLS)
-plt.title('CGLS Criterion vs iterations')
-plt.show()
-
-# Create least squares object instance with projector, test data and a constant
-# coefficient of 0.5. Note it is least squares over all channels.
-f = Norm2sq(Aall,data2d,c=0.5)
-
-# Options for FISTA algorithm.
-opt = {'tol': 1e-4, 'iter': 100}
-
-# Run FISTA for least squares without regularization and display one channel
-# reconstruction as image.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, None, opt)
-
-plt.imshow(x_fista0.subset(channel=100).array)
-plt.title('FISTA LS')
-plt.show()
-
-plt.semilogy(criter0)
-plt.title('FISTA LS Criterion vs iterations')
-plt.show()
-
-# Set up 1-norm regularisation (over all channels), solve with FISTA, and
-# display one channel of reconstruction.
-lam = 0.1
-g0 = Norm1(lam)
-
-x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g0, opt)
-
-plt.imshow(x_fista1.subset(channel=100).array)
-plt.title('FISTA LS+1')
-plt.show()
-
-plt.semilogy(criter1)
-plt.title('FISTA LS+1 Criterion vs iterations')
-plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/wip/old_demos/demo_compare_cvx.py b/Wrappers/Python/wip/old_demos/demo_compare_cvx.py
deleted file mode 100644
index 27b1c97..0000000
--- a/Wrappers/Python/wip/old_demos/demo_compare_cvx.py
+++ /dev/null
@@ -1,306 +0,0 @@
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS
-from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D, Norm2
-
-from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity
-from ccpi.optimisation.ops import Identity
-from ccpi.optimisation.ops import FiniteDiff2D
-
-# Requires CVXPY, see http://www.cvxpy.org/
-# CVXPY can be installed in anaconda using
-# conda install -c cvxgrp cvxpy libgcc
-
-# Whether to use or omit CVXPY
-use_cvxpy = True
-if use_cvxpy:
- from cvxpy import *
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-# Problem data.
-m = 30
-n = 20
-np.random.seed(1)
-Amat = np.random.randn(m, n)
-A = LinearOperatorMatrix(Amat)
-bmat = np.random.randn(m)
-bmat.shape = (bmat.shape[0],1)
-
-# A = Identity()
-# Change n to equal to m.
-
-b = DataContainer(bmat)
-
-# Regularization parameter
-lam = 10
-opt = {'memopt':True}
-# Create object instances with the test data A and b.
-f = Norm2sq(A,b,c=0.5, memopt=True)
-g0 = ZeroFun()
-
-# Initial guess
-x_init = DataContainer(np.zeros((n,1)))
-
-f.grad(x_init)
-
-# Run FISTA for least squares plus zero function.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)
-
-# Print solution and final objective/criterion value for comparison
-print("FISTA least squares plus zero function solution and objective value:")
-print(x_fista0.array)
-print(criter0[-1])
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- x0 = Variable(n)
- objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) )
- prob0 = Problem(objective0)
-
- # The optimal objective is returned by prob.solve().
- result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus zero function solution and objective value:")
- print(x0.value)
- print(objective0.value)
-
-# Plot criterion curve to see FISTA converge to same value as CVX.
-iternum = np.arange(1,1001)
-plt.figure()
-plt.loglog(iternum[[0,-1]],[objective0.value, objective0.value], label='CVX LS')
-plt.loglog(iternum,criter0,label='FISTA LS')
-plt.legend()
-plt.show()
-
-# Create 1-norm object instance
-g1 = Norm1(lam)
-
-g1(x_init)
-x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1)))
-x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1)))
-v = g1.prox(x_rand,0.02)
-#vv = g1.prox(x_rand2,0.02)
-vv = v.copy()
-vv *= 0
-print (">>>>>>>>>>vv" , vv.as_array())
-vv.fill(v)
-print (">>>>>>>>>>fill" , vv.as_array())
-g1.proximal(x_rand, 0.02, out=vv)
-print (">>>>>>>>>>v" , v.as_array())
-print (">>>>>>>>>>gradient" , vv.as_array())
-
-print (">>>>>>>>>>" , (v-vv).as_array())
-import sys
-#sys.exit(0)
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt)
-
-# Print for comparison
-print("FISTA least squares plus 1-norm solution and objective value:")
-print(x_fista1)
-print(criter1[-1])
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- x1 = Variable(n)
- objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
- prob1 = Problem(objective1)
-
- # The optimal objective is returned by prob.solve().
- result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus 1-norm solution and objective value:")
- print(x1.value)
- print(objective1.value)
-
-# Now try another algorithm FBPD for same problem:
-x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1)
-print(x_fbpd1)
-print(criterfbpd1[-1])
-
-# Plot criterion curve to see both FISTA and FBPD converge to same value.
-# Note that FISTA is very efficient for 1-norm minimization so it beats
-# FBPD in this test by a lot. But FBPD can handle a larger class of problems
-# than FISTA can.
-plt.figure()
-plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.legend()
-plt.show()
-
-plt.figure()
-plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
-plt.legend()
-plt.show()
-
-# Now try 1-norm and TV denoising with FBPD, first 1-norm.
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 64
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Identity operator for denoising
-I = TomoIdentity(ig)
-
-# Data and add noise
-y = I.direct(Phantom)
-y.array = y.array + 0.1*np.random.randn(N, N)
-
-plt.imshow(y.array)
-plt.title('Noisy image')
-plt.show()
-
-
-###################
-# Data fidelity term
-f_denoise = Norm2sq(I,y,c=0.5,memopt=True)
-
-# 1-norm regulariser
-lam1_denoise = 1.0
-g1_denoise = Norm1(lam1_denoise)
-
-# Initial guess
-x_init_denoise = ImageData(np.zeros((N,N)))
-
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-
-print(x_fista1_denoise)
-print(criter1_denoise[-1])
-
-#plt.imshow(x_fista1_denoise.as_array())
-#plt.title('FISTA LS+1')
-#plt.show()
-
-# Now denoise LS + 1-norm with FBPD
-x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, \
- criterfbpd1_denoise = FBPD(x_init_denoise, I, None, f_denoise, g1_denoise)
-print(x_fbpd1_denoise)
-print(criterfbpd1_denoise[-1])
-
-#plt.imshow(x_fbpd1_denoise.as_array())
-#plt.title('FBPD LS+1')
-#plt.show()
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- x1_denoise = Variable(N**2,1)
- objective1_denoise = Minimize(0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise,1) )
- prob1_denoise = Problem(objective1_denoise)
-
- # The optimal objective is returned by prob.solve().
- result1_denoise = prob1_denoise.solve(verbose=False,solver=SCS,eps=1e-12)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus 1-norm solution and objective value:")
- print(x1_denoise.value)
- print(objective1_denoise.value)
-
-x1_cvx = x1_denoise.value
-x1_cvx.shape = (N,N)
-
-
-
-#plt.imshow(x1_cvx)
-#plt.title('CVX LS+1')
-#plt.show()
-
-fig = plt.figure()
-plt.subplot(1,4,1)
-plt.imshow(y.array)
-plt.title("LS+1")
-plt.subplot(1,4,2)
-plt.imshow(x_fista1_denoise.as_array())
-plt.title("fista")
-plt.subplot(1,4,3)
-plt.imshow(x_fbpd1_denoise.as_array())
-plt.title("fbpd")
-plt.subplot(1,4,4)
-plt.imshow(x1_cvx)
-plt.title("cvx")
-plt.show()
-
-##############################################################
-# Now TV with FBPD and Norm2
-lam_tv = 0.1
-gtv = TV2D(lam_tv)
-norm2 = Norm2(lam_tv)
-op = FiniteDiff2D()
-#gtv(gtv.op.direct(x_init_denoise))
-
-opt_tv = {'tol': 1e-4, 'iter': 10000}
-
-x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, \
- criterfbpdtv_denoise = FBPD(x_init_denoise, op, None, \
- f_denoise, norm2 ,opt=opt_tv)
-print(x_fbpdtv_denoise)
-print(criterfbpdtv_denoise[-1])
-
-plt.imshow(x_fbpdtv_denoise.as_array())
-plt.title('FBPD TV')
-#plt.show()
-
-if use_cvxpy:
- # Compare to CVXPY
-
- # Construct the problem.
- xtv_denoise = Variable((N,N))
- #print (xtv_denoise.value.shape)
- objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) )
- probtv_denoise = Problem(objectivetv_denoise)
-
- # The optimal objective is returned by prob.solve().
- resulttv_denoise = probtv_denoise.solve(verbose=False,solver=SCS,eps=1e-12)
-
- # The optimal solution for x is stored in x.value and optimal objective value
- # is in result as well as in objective.value
- print("CVXPY least squares plus 1-norm solution and objective value:")
- print(xtv_denoise.value)
- print(objectivetv_denoise.value)
-
-plt.imshow(xtv_denoise.value)
-plt.title('CVX TV')
-#plt.show()
-
-fig = plt.figure()
-plt.subplot(1,3,1)
-plt.imshow(y.array)
-plt.title("TV2D")
-plt.subplot(1,3,2)
-plt.imshow(x_fbpdtv_denoise.as_array())
-plt.title("fbpd tv denoise")
-plt.subplot(1,3,3)
-plt.imshow(xtv_denoise.value)
-plt.title("CVX tv")
-plt.show()
-
-
-
-plt.loglog([0,opt_tv['iter']], [objectivetv_denoise.value,objectivetv_denoise.value], label='CVX TV')
-plt.loglog(criterfbpdtv_denoise, label='FBPD TV')
diff --git a/Wrappers/Python/wip/old_demos/demo_gradient_descent.py b/Wrappers/Python/wip/old_demos/demo_gradient_descent.py
deleted file mode 100755
index 4d6647e..0000000
--- a/Wrappers/Python/wip/old_demos/demo_gradient_descent.py
+++ /dev/null
@@ -1,295 +0,0 @@
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS
-from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D, Norm2
-
-from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity
-from ccpi.optimisation.ops import Identity
-from ccpi.optimisation.ops import FiniteDiff2D
-
-# Requires CVXPY, see http://www.cvxpy.org/
-# CVXPY can be installed in anaconda using
-# conda install -c cvxgrp cvxpy libgcc
-
-# Whether to use or omit CVXPY
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-class Algorithm(object):
- def __init__(self, *args, **kwargs):
- pass
- def set_up(self, *args, **kwargs):
- raise NotImplementedError()
- def update(self):
- raise NotImplementedError()
-
- def should_stop(self):
- raise NotImplementedError()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self.should_stop():
- raise StopIteration()
- else:
- self.update()
-
-class GradientDescent(Algorithm):
- x = None
- rate = 0
- objective_function = None
- regulariser = None
- iteration = 0
- stop_cryterion = 'max_iter'
- __max_iteration = 0
- __loss = []
- def __init__(self, **kwargs):
- args = ['x_init', 'objective_function', 'rate']
- present = True
- for k,v in kwargs.items():
- if k in args:
- args.pop(args.index(k))
- if len(args) == 0:
- return self.set_up(x_init=kwargs['x_init'],
- objective_function=kwargs['objective_function'],
- rate=kwargs['rate'])
-
- def should_stop(self):
- return self.iteration >= self.max_iteration
-
- def set_up(self, x_init, objective_function, rate):
- self.x = x_init.copy()
- self.x_update = x_init.copy()
- self.objective_function = objective_function
- self.rate = rate
- self.__loss.append(objective_function(x_init))
-
- def update(self):
-
- self.objective_function.gradient(self.x, out=self.x_update)
- self.x_update *= -self.rate
- self.x += self.x_update
- self.__loss.append(self.objective_function(self.x))
- self.iteration += 1
-
- def get_output(self):
- return self.x
- def get_current_loss(self):
- return self.__loss[-1]
- @property
- def loss(self):
- return self.__loss
- @property
- def max_iteration(self):
- return self.__max_iteration
- @max_iteration.setter
- def max_iteration(self, value):
- assert isinstance(value, int)
- self.__max_iteration = value
-
-
-
-
-
-# Problem data.
-m = 30
-n = 20
-np.random.seed(1)
-Amat = np.random.randn(m, n)
-A = LinearOperatorMatrix(Amat)
-bmat = np.random.randn(m)
-bmat.shape = (bmat.shape[0],1)
-
-# A = Identity()
-# Change n to equal to m.
-
-b = DataContainer(bmat)
-
-# Regularization parameter
-lam = 10
-opt = {'memopt':True}
-# Create object instances with the test data A and b.
-f = Norm2sq(A,b,c=0.5, memopt=True)
-g0 = ZeroFun()
-
-# Initial guess
-x_init = DataContainer(np.zeros((n,1)))
-
-f.grad(x_init)
-
-# Run FISTA for least squares plus zero function.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)
-
-# Print solution and final objective/criterion value for comparison
-print("FISTA least squares plus zero function solution and objective value:")
-print(x_fista0.array)
-print(criter0[-1])
-
-gd = GradientDescent(x_init=x_init, objective_function=f, rate=0.001)
-gd.max_iteration = 5000
-
-for i,el in enumerate(gd):
- if i%100 == 0:
- print ("\rIteration {} Loss: {}".format(gd.iteration,
- gd.get_current_loss()))
-
-
-#%%
-
-
-#
-#if use_cvxpy:
-# # Compare to CVXPY
-#
-# # Construct the problem.
-# x0 = Variable(n)
-# objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) )
-# prob0 = Problem(objective0)
-#
-# # The optimal objective is returned by prob.solve().
-# result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9)
-#
-# # The optimal solution for x is stored in x.value and optimal objective value
-# # is in result as well as in objective.value
-# print("CVXPY least squares plus zero function solution and objective value:")
-# print(x0.value)
-# print(objective0.value)
-#
-## Plot criterion curve to see FISTA converge to same value as CVX.
-#iternum = np.arange(1,1001)
-#plt.figure()
-#plt.loglog(iternum[[0,-1]],[objective0.value, objective0.value], label='CVX LS')
-#plt.loglog(iternum,criter0,label='FISTA LS')
-#plt.legend()
-#plt.show()
-#
-## Create 1-norm object instance
-#g1 = Norm1(lam)
-#
-#g1(x_init)
-#x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1)))
-#x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1)))
-#v = g1.prox(x_rand,0.02)
-##vv = g1.prox(x_rand2,0.02)
-#vv = v.copy()
-#vv *= 0
-#print (">>>>>>>>>>vv" , vv.as_array())
-#vv.fill(v)
-#print (">>>>>>>>>>fill" , vv.as_array())
-#g1.proximal(x_rand, 0.02, out=vv)
-#print (">>>>>>>>>>v" , v.as_array())
-#print (">>>>>>>>>>gradient" , vv.as_array())
-#
-#print (">>>>>>>>>>" , (v-vv).as_array())
-#import sys
-##sys.exit(0)
-## Combine with least squares and solve using generic FISTA implementation
-#x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt)
-#
-## Print for comparison
-#print("FISTA least squares plus 1-norm solution and objective value:")
-#print(x_fista1)
-#print(criter1[-1])
-#
-#if use_cvxpy:
-# # Compare to CVXPY
-#
-# # Construct the problem.
-# x1 = Variable(n)
-# objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
-# prob1 = Problem(objective1)
-#
-# # The optimal objective is returned by prob.solve().
-# result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9)
-#
-# # The optimal solution for x is stored in x.value and optimal objective value
-# # is in result as well as in objective.value
-# print("CVXPY least squares plus 1-norm solution and objective value:")
-# print(x1.value)
-# print(objective1.value)
-#
-## Now try another algorithm FBPD for same problem:
-#x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1)
-#print(x_fbpd1)
-#print(criterfbpd1[-1])
-#
-## Plot criterion curve to see both FISTA and FBPD converge to same value.
-## Note that FISTA is very efficient for 1-norm minimization so it beats
-## FBPD in this test by a lot. But FBPD can handle a larger class of problems
-## than FISTA can.
-#plt.figure()
-#plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-#plt.loglog(iternum,criter1,label='FISTA LS+1')
-#plt.legend()
-#plt.show()
-#
-#plt.figure()
-#plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1')
-#plt.loglog(iternum,criter1,label='FISTA LS+1')
-#plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
-#plt.legend()
-#plt.show()
-
-# Now try 1-norm and TV denoising with FBPD, first 1-norm.
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 64
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Identity operator for denoising
-I = TomoIdentity(ig)
-
-# Data and add noise
-y = I.direct(Phantom)
-y.array = y.array + 0.1*np.random.randn(N, N)
-
-plt.imshow(y.array)
-plt.title('Noisy image')
-plt.show()
-
-
-###################
-# Data fidelity term
-f_denoise = Norm2sq(I,y,c=0.5,memopt=True)
-
-# 1-norm regulariser
-lam1_denoise = 1.0
-g1_denoise = Norm1(lam1_denoise)
-
-# Initial guess
-x_init_denoise = ImageData(np.zeros((N,N)))
-
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = \
- FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-
-print(x_fista1_denoise)
-print(criter1_denoise[-1])
-
-f_2 =
-gd = GradientDescent(x_init=x_init_denoise,
- objective_function=f, rate=0.001)
-gd.max_iteration = 5000
-
-for i,el in enumerate(gd):
- if i%100 == 0:
- print ("\rIteration {} Loss: {}".format(gd.iteration,
- gd.get_current_loss()))
-
-plt.imshow(gd.get_output().as_array())
-plt.title('GD image')
-plt.show()
-
diff --git a/Wrappers/Python/wip/old_demos/demo_imat_multichan_RGLTK.py b/Wrappers/Python/wip/old_demos/demo_imat_multichan_RGLTK.py
deleted file mode 100644
index 8370c78..0000000
--- a/Wrappers/Python/wip/old_demos/demo_imat_multichan_RGLTK.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# This script demonstrates how to load IMAT fits data
-# into the CIL optimisation framework and run reconstruction methods.
-#
-# Demo to reconstruct energy-discretized channels of IMAT data
-
-# needs dxchange: conda install -c conda-forge dxchange
-# needs astropy: conda install astropy
-
-
-# All third-party imports.
-import numpy as np
-import matplotlib.pyplot as plt
-from dxchange.reader import read_fits
-from astropy.io import fits
-
-# All own imports.
-from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData, DataContainer
-from ccpi.astra.ops import AstraProjectorSimple, AstraProjector3DSimple
-from ccpi.optimisation.algs import CGLS, FISTA
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-from ccpi.plugins.regularisers import FGP_TV
-
-# set main parameters here
-n = 512
-totalAngles = 250 # total number of projection angles
-# spectral discretization parameter
-num_average = 145 # channel discretization frequency (total number of averaged channels)
-numChannels = 2970 # 2970
-totChannels = round(numChannels/num_average) # the resulting number of channels
-Projections_stack = np.zeros((num_average,n,n),dtype='uint16')
-ProjAngleChannels = np.zeros((totalAngles,totChannels,n,n),dtype='float32')
-
-#########################################################################
-print ("Loading the data...")
-MainPath = '/media/jakob/050d8d45-fab3-4285-935f-260e6c5f162c1/Data/neutrondata/' # path to data
-pathname0 = '{!s}{!s}'.format(MainPath,'PSI_phantom_IMAT/DATA/Sample/')
-counterFileName = 4675
-# A main loop over all available angles
-for ll in range(0,totalAngles,1):
- pathnameData = '{!s}{!s}{!s}/'.format(pathname0,'angle',str(ll))
- filenameCurr = '{!s}{!s}{!s}'.format('IMAT0000',str(counterFileName),'_Tomo_test_000_')
- counterT = 0
- # loop over reduced channels (discretized)
- for i in range(0,totChannels,1):
- sumCount = 0
- # loop over actual channels to obtain averaged one
- for j in range(0,num_average,1):
- if counterT < 10:
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'0000',str(counterT))
- if ((counterT >= 10) & (counterT < 100)):
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'000',str(counterT))
- if ((counterT >= 100) & (counterT < 1000)):
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'00',str(counterT))
- if ((counterT >= 1000) & (counterT < 10000)):
- outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'0',str(counterT))
- try:
- Projections_stack[j,:,:] = read_fits(outfile)
- except:
- print("Fits is corrupted, skipping no.", counterT)
- sumCount -= 1
- counterT += 1
- sumCount += 1
- AverageProj=np.sum(Projections_stack,axis=0)/sumCount # averaged projection over "num_average" channels
- ProjAngleChannels[ll,i,:,:] = AverageProj
- print("Angle is processed", ll)
- counterFileName += 1
-#########################################################################
-
-flat1 = read_fits('{!s}{!s}{!s}'.format(MainPath,'PSI_phantom_IMAT/DATA/','OpenBeam_aft1/IMAT00004932_Tomo_test_000_SummedImg.fits'))
-nonzero = flat1 > 0
-# Apply flat field and take negative log
-for ll in range(0,totalAngles,1):
- for i in range(0,totChannels,1):
- ProjAngleChannels[ll,i,nonzero] = ProjAngleChannels[ll,i,nonzero]/flat1[nonzero]
-
-eqzero = ProjAngleChannels == 0
-ProjAngleChannels[eqzero] = 1
-ProjAngleChannels_NormLog = -np.log(ProjAngleChannels) # normalised and neg-log data
-
-# extact sinogram over energy channels
-selectedVertical_slice = 256
-sino_all_channels = ProjAngleChannels_NormLog[:,:,:,selectedVertical_slice]
-# Set angles to use
-angles = np.linspace(-np.pi,np.pi,totalAngles,endpoint=False)
-
-# set the geometry
-ig = ImageGeometry(n,n)
-ag = AcquisitionGeometry('parallel',
- '2D',
- angles,
- n,1)
-Aop = AstraProjectorSimple(ig, ag, 'gpu')
-
-
-# loop to reconstruct energy channels
-REC_chan = np.zeros((totChannels, n, n), 'float32')
-for i in range(0,totChannels,1):
- sino_channel = sino_all_channels[:,i,:] # extract a sinogram for i-th channel
-
- print ("Initial guess")
- x_init = ImageData(geometry=ig)
-
- # Create least squares object instance with projector and data.
- print ("Create least squares object instance with projector and data.")
- f = Norm2sq(Aop,DataContainer(sino_channel),c=0.5)
-
- print ("Run FISTA-TV for least squares")
- lamtv = 5
- opt = {'tol': 1e-4, 'iter': 200}
- g_fgp = FGP_TV(lambdaReg = lamtv,
- iterationsTV=50,
- tolerance=1e-6,
- methodTV=0,
- nonnegativity=0,
- printing=0,
- device='gpu')
-
- x_fista_fgp, it1, timing1, criter_fgp = FISTA(x_init, f, g_fgp, opt)
- REC_chan[i,:,:] = x_fista_fgp.array
- """
- plt.figure()
- plt.subplot(121)
- plt.imshow(x_fista_fgp.array, vmin=0, vmax=0.05)
- plt.title('FISTA FGP TV')
- plt.subplot(122)
- plt.semilogy(criter_fgp)
- plt.show()
- """
- """
- print ("Run CGLS for least squares")
- opt = {'tol': 1e-4, 'iter': 20}
- x_init = ImageData(geometry=ig)
- x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, DataContainer(sino_channel), opt=opt)
-
- plt.figure()
- plt.imshow(x_CGLS.array,vmin=0, vmax=0.05)
- plt.title('CGLS')
- plt.show()
- """
-# Saving images into fits using astrapy if required
-counter = 0
-filename = 'FISTA_TV_imat_slice'
-for i in range(totChannels):
- im = REC_chan[i,:,:]
- add_val = np.min(im[:])
- im += abs(add_val)
- im = im/np.max(im[:])*65535
- outfile = '{!s}_{!s}_{!s}.fits'.format(filename,str(selectedVertical_slice),str(counter))
- hdu = fits.PrimaryHDU(np.uint16(im))
- hdu.writeto(outfile, overwrite=True)
- counter += 1 \ No newline at end of file
diff --git a/Wrappers/Python/wip/old_demos/demo_imat_whitebeam.py b/Wrappers/Python/wip/old_demos/demo_imat_whitebeam.py
deleted file mode 100644
index e0d213e..0000000
--- a/Wrappers/Python/wip/old_demos/demo_imat_whitebeam.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# This script demonstrates how to load IMAT fits data
-# into the CIL optimisation framework and run reconstruction methods.
-#
-# This demo loads the summedImg files which are the non-spectral images
-# resulting from summing projections over all spectral channels.
-
-# needs dxchange: conda install -c conda-forge dxchange
-# needs astropy: conda install astropy
-
-
-# All third-party imports.
-import numpy
-from scipy.io import loadmat
-import matplotlib.pyplot as plt
-from dxchange.reader import read_fits
-
-# All own imports.
-from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData
-from ccpi.astra.ops import AstraProjectorSimple, AstraProjector3DSimple
-from ccpi.optimisation.algs import CGLS, FISTA
-from ccpi.optimisation.funcs import Norm2sq, Norm1
-
-# Load and display a couple of summed projection as examples
-pathname0 = '/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/Sample/angle0/'
-filename0 = 'IMAT00004675_Tomo_test_000_SummedImg.fits'
-
-data0 = read_fits(pathname0 + filename0)
-
-pathname10 = '/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/Sample/angle10/'
-filename10 = 'IMAT00004685_Tomo_test_000_SummedImg.fits'
-
-data10 = read_fits(pathname10 + filename10)
-
-# Load a flat field (more are available, should we average over them?)
-flat1 = read_fits('/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/OpenBeam_aft1/IMAT00004932_Tomo_test_000_SummedImg.fits')
-
-# Apply flat field and display after flat-field correction and negative log
-data0_rel = numpy.zeros(numpy.shape(flat1), dtype = float)
-nonzero = flat1 > 0
-data0_rel[nonzero] = data0[nonzero] / flat1[nonzero]
-data10_rel = numpy.zeros(numpy.shape(flat1), dtype = float)
-data10_rel[nonzero] = data10[nonzero] / flat1[nonzero]
-
-plt.imshow(data0_rel)
-plt.colorbar()
-plt.show()
-
-plt.imshow(-numpy.log(data0_rel))
-plt.colorbar()
-plt.show()
-
-plt.imshow(data10_rel)
-plt.colorbar()
-plt.show()
-
-plt.imshow(-numpy.log(data10_rel))
-plt.colorbar()
-plt.show()
-
-# Set up for loading all summed images at 250 angles.
-pathname = '/media/newhd/shared/Data/neutrondata/PSI_phantom_IMAT/DATA/Sample/angle{}/'
-filename = 'IMAT0000{}_Tomo_test_000_SummedImg.fits'
-
-# Dimensions
-num_angles = 250
-imsize = 512
-
-# Initialise array
-data = numpy.zeros((num_angles,imsize,imsize))
-
-# Load only 0-249, as 250 is at repetition of zero degrees just like image 0
-for i in range(0,250):
- curimfile = (pathname + filename).format(i, i+4675)
- data[i,:,:] = read_fits(curimfile)
-
-# Apply flat field and take negative log
-nonzero = flat1 > 0
-for i in range(0,250):
- data[i,nonzero] = data[i,nonzero]/flat1[nonzero]
-
-eqzero = data == 0
-data[eqzero] = 1
-
-data_rel = -numpy.log(data)
-
-# Permute order to get: angles, vertical, horizontal, as default in framework.
-data_rel = numpy.transpose(data_rel,(0,2,1))
-
-# Set angles to use
-angles = numpy.linspace(-numpy.pi,numpy.pi,num_angles,endpoint=False)
-
-# Create 3D acquisition geometry and acquisition data
-ag = AcquisitionGeometry('parallel',
- '3D',
- angles,
- pixel_num_h=imsize,
- pixel_num_v=imsize)
-b = AcquisitionData(data_rel, geometry=ag)
-
-# Reduce to single (noncentral) slice by extracting relevant parameters from data and its
-# geometry. Perhaps create function to extract central slice automatically?
-b2d = b.subset(vertical=128)
-ag2d = AcquisitionGeometry('parallel',
- '2D',
- ag.angles,
- pixel_num_h=ag.pixel_num_h)
-b2d.geometry = ag2d
-
-# Create 2D image geometry
-ig2d = ImageGeometry(voxel_num_x=ag2d.pixel_num_h,
- voxel_num_y=ag2d.pixel_num_h)
-
-# Create GPU projector/backprojector operator with ASTRA.
-Aop = AstraProjectorSimple(ig2d,ag2d,'gpu')
-
-# Demonstrate operator is working by applying simple backprojection.
-z = Aop.adjoint(b2d)
-plt.imshow(z.array)
-plt.title('Simple backprojection')
-plt.colorbar()
-plt.show()
-
-# Set initial guess ImageData with zeros for algorithms, and algorithm options.
-x_init = ImageData(numpy.zeros((imsize,imsize)),
- geometry=ig2d)
-opt_CGLS = {'tol': 1e-4, 'iter': 20}
-
-# Run CGLS algorithm and display reconstruction.
-x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b2d, opt_CGLS)
-
-plt.imshow(x_CGLS.array)
-plt.title('CGLS')
-plt.colorbar()
-plt.show()
-
-plt.semilogy(criter_CGLS)
-plt.title('CGLS Criterion vs iterations')
-plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/wip/old_demos/demo_memhandle.py b/Wrappers/Python/wip/old_demos/demo_memhandle.py
deleted file mode 100755
index db48d73..0000000
--- a/Wrappers/Python/wip/old_demos/demo_memhandle.py
+++ /dev/null
@@ -1,193 +0,0 @@
-
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS
-from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D
-
-from ccpi.optimisation.ops import LinearOperatorMatrix, Identity
-from ccpi.optimisation.ops import TomoIdentity
-
-# Requires CVXPY, see http://www.cvxpy.org/
-# CVXPY can be installed in anaconda using
-# conda install -c cvxgrp cvxpy libgcc
-
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-# Problem data.
-m = 30
-n = 20
-np.random.seed(1)
-Amat = np.random.randn(m, n)
-A = LinearOperatorMatrix(Amat)
-bmat = np.random.randn(m)
-bmat.shape = (bmat.shape[0],1)
-
-
-
-# A = Identity()
-# Change n to equal to m.
-
-b = DataContainer(bmat)
-
-# Regularization parameter
-lam = 10
-
-# Create object instances with the test data A and b.
-f = Norm2sq(A,b,c=0.5, memopt=True)
-g0 = ZeroFun()
-
-# Initial guess
-x_init = DataContainer(np.zeros((n,1)))
-
-f.grad(x_init)
-opt = {'memopt': True}
-# Run FISTA for least squares plus zero function.
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0)
-x_fista0_m, it0_m, timing0_m, criter0_m = FISTA(x_init, f, g0, opt=opt)
-
-iternum = [i for i in range(len(criter0))]
-# Print solution and final objective/criterion value for comparison
-print("FISTA least squares plus zero function solution and objective value:")
-print(x_fista0.array)
-print(criter0[-1])
-
-
-# Plot criterion curve to see FISTA converge to same value as CVX.
-#iternum = np.arange(1,1001)
-plt.figure()
-plt.loglog(iternum,criter0,label='FISTA LS')
-plt.loglog(iternum,criter0_m,label='FISTA LS memopt')
-plt.legend()
-plt.show()
-#%%
-# Create 1-norm object instance
-g1 = Norm1(lam)
-
-g1(x_init)
-g1.prox(x_init,0.02)
-
-# Combine with least squares and solve using generic FISTA implementation
-x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1)
-x_fista1_m, it1_m, timing1_m, criter1_m = FISTA(x_init, f, g1, opt=opt)
-iternum = [i for i in range(len(criter1))]
-# Print for comparison
-print("FISTA least squares plus 1-norm solution and objective value:")
-print(x_fista1)
-print(criter1[-1])
-
-
-# Now try another algorithm FBPD for same problem:
-x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1)
-iternum = [i for i in range(len(criterfbpd1))]
-print(x_fbpd1)
-print(criterfbpd1[-1])
-
-# Plot criterion curve to see both FISTA and FBPD converge to same value.
-# Note that FISTA is very efficient for 1-norm minimization so it beats
-# FBPD in this test by a lot. But FBPD can handle a larger class of problems
-# than FISTA can.
-plt.figure()
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.loglog(iternum,criter1_m,label='FISTA LS+1 memopt')
-plt.legend()
-plt.show()
-
-plt.figure()
-plt.loglog(iternum,criter1,label='FISTA LS+1')
-plt.loglog(iternum,criterfbpd1,label='FBPD LS+1')
-plt.legend()
-plt.show()
-#%%
-# Now try 1-norm and TV denoising with FBPD, first 1-norm.
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 1000
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Identity operator for denoising
-I = TomoIdentity(ig)
-
-# Data and add noise
-y = I.direct(Phantom)
-y.array += 0.1*np.random.randn(N, N)
-
-plt.figure()
-plt.imshow(y.array)
-plt.title('Noisy image')
-plt.show()
-
-# Data fidelity term
-f_denoise = Norm2sq(I,y,c=0.5, memopt=True)
-
-# 1-norm regulariser
-lam1_denoise = 1.0
-g1_denoise = Norm1(lam1_denoise)
-
-# Initial guess
-x_init_denoise = ImageData(np.zeros((N,N)))
-opt = {'memopt': False, 'iter' : 50}
-# Combine with least squares and solve using generic FISTA implementation
-print ("no memopt")
-x_fista1_denoise, it1_denoise, timing1_denoise, \
- criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-opt = {'memopt': True, 'iter' : 50}
-print ("yes memopt")
-x_fista1_denoise_m, it1_denoise_m, timing1_denoise_m, \
- criter1_denoise_m = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
-
-print(x_fista1_denoise)
-print(criter1_denoise[-1])
-
-plt.figure()
-plt.imshow(x_fista1_denoise.as_array())
-plt.title('FISTA LS+1')
-plt.show()
-
-plt.figure()
-plt.imshow(x_fista1_denoise_m.as_array())
-plt.title('FISTA LS+1 memopt')
-plt.show()
-
-plt.figure()
-plt.loglog(iternum,criter1_denoise,label='FISTA LS+1')
-plt.loglog(iternum,criter1_denoise_m,label='FISTA LS+1 memopt')
-plt.legend()
-plt.show()
-#%%
-# Now denoise LS + 1-norm with FBPD
-x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise)
-print(x_fbpd1_denoise)
-print(criterfbpd1_denoise[-1])
-
-plt.figure()
-plt.imshow(x_fbpd1_denoise.as_array())
-plt.title('FBPD LS+1')
-plt.show()
-
-
-# Now TV with FBPD
-lam_tv = 0.1
-gtv = TV2D(lam_tv)
-gtv(gtv.op.direct(x_init_denoise))
-
-opt_tv = {'tol': 1e-4, 'iter': 10000}
-
-x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv)
-print(x_fbpdtv_denoise)
-print(criterfbpdtv_denoise[-1])
-
-plt.imshow(x_fbpdtv_denoise.as_array())
-plt.title('FBPD TV')
-plt.show()
diff --git a/Wrappers/Python/wip/old_demos/demo_test_sirt.py b/Wrappers/Python/wip/old_demos/demo_test_sirt.py
deleted file mode 100644
index 6f5a44d..0000000
--- a/Wrappers/Python/wip/old_demos/demo_test_sirt.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# This demo illustrates how to use the SIRT algorithm without and with
-# nonnegativity and box constraints. The ASTRA 2D projectors are used.
-
-# First make all imports
-from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, \
- AcquisitionData
-from ccpi.optimisation.algs import FISTA, FBPD, CGLS, SIRT
-from ccpi.optimisation.funcs import Norm2sq, Norm1, TV2D, IndicatorBox
-from ccpi.astra.ops import AstraProjectorSimple
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-# Choose either a parallel-beam (1=parallel2D) or fan-beam (2=cone2D) test case
-test_case = 1
-
-# Set up phantom size NxN by creating ImageGeometry, initialising the
-# ImageData object with this geometry and empty array and finally put some
-# data into its array, and display as image.
-N = 128
-ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
-Phantom = ImageData(geometry=ig)
-
-x = Phantom.as_array()
-x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-plt.imshow(x)
-plt.title('Phantom image')
-plt.show()
-
-# Set up AcquisitionGeometry object to hold the parameters of the measurement
-# setup geometry: # Number of angles, the actual angles from 0 to
-# pi for parallel beam and 0 to 2pi for fanbeam, set the width of a detector
-# pixel relative to an object pixel, the number of detector pixels, and the
-# source-origin and origin-detector distance (here the origin-detector distance
-# set to 0 to simulate a "virtual detector" with same detector pixel size as
-# object pixel size).
-angles_num = 20
-det_w = 1.0
-det_num = N
-SourceOrig = 200
-OrigDetec = 0
-
-if test_case==1:
- angles = np.linspace(0,np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('parallel',
- '2D',
- angles,
- det_num,det_w)
-elif test_case==2:
- angles = np.linspace(0,2*np.pi,angles_num,endpoint=False)
- ag = AcquisitionGeometry('cone',
- '2D',
- angles,
- det_num,
- det_w,
- dist_source_center=SourceOrig,
- dist_center_detector=OrigDetec)
-else:
- NotImplemented
-
-# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
-# wrapping calls to ASTRA as well as specifying whether to use CPU or GPU.
-Aop = AstraProjectorSimple(ig, ag, 'gpu')
-
-# Forward and backprojection are available as methods direct and adjoint. Here
-# generate test data b and do simple backprojection to obtain z.
-b = Aop.direct(Phantom)
-z = Aop.adjoint(b)
-
-plt.imshow(b.array)
-plt.title('Simulated data')
-plt.show()
-
-plt.imshow(z.array)
-plt.title('Backprojected data')
-plt.show()
-
-# Using the test data b, different reconstruction methods can now be set up as
-# demonstrated in the rest of this file. In general all methods need an initial
-# guess and some algorithm options to be set:
-x_init = ImageData(np.zeros(x.shape),geometry=ig)
-opt = {'tol': 1e-4, 'iter': 1000}
-
-# First a CGLS reconstruction can be done:
-x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b, opt)
-
-plt.imshow(x_CGLS.array)
-plt.title('CGLS')
-plt.colorbar()
-plt.show()
-
-plt.semilogy(criter_CGLS)
-plt.title('CGLS criterion')
-plt.show()
-
-# A SIRT unconstrained reconstruction can be done: similarly:
-x_SIRT, it_SIRT, timing_SIRT, criter_SIRT = SIRT(x_init, Aop, b, opt)
-
-plt.imshow(x_SIRT.array)
-plt.title('SIRT unconstrained')
-plt.colorbar()
-plt.show()
-
-plt.semilogy(criter_SIRT)
-plt.title('SIRT unconstrained criterion')
-plt.show()
-
-# A SIRT nonnegativity constrained reconstruction can be done using the
-# additional input "constraint" set to a box indicator function with 0 as the
-# lower bound and the default upper bound of infinity:
-x_SIRT0, it_SIRT0, timing_SIRT0, criter_SIRT0 = SIRT(x_init, Aop, b, opt,
- constraint=IndicatorBox(lower=0))
-
-plt.imshow(x_SIRT0.array)
-plt.title('SIRT nonneg')
-plt.colorbar()
-plt.show()
-
-plt.semilogy(criter_SIRT0)
-plt.title('SIRT nonneg criterion')
-plt.show()
-
-# A SIRT reconstruction with box constraints on [0,1] can also be done:
-x_SIRT01, it_SIRT01, timing_SIRT01, criter_SIRT01 = SIRT(x_init, Aop, b, opt,
- constraint=IndicatorBox(lower=0,upper=1))
-
-plt.imshow(x_SIRT01.array)
-plt.title('SIRT box(0,1)')
-plt.colorbar()
-plt.show()
-
-plt.semilogy(criter_SIRT01)
-plt.title('SIRT box(0,1) criterion')
-plt.show()
-
-# The indicator function can also be used with the FISTA algorithm to do
-# least squares with nonnegativity constraint.
-
-# Create least squares object instance with projector, test data and a constant
-# coefficient of 0.5:
-f = Norm2sq(Aop,b,c=0.5)
-
-# Run FISTA for least squares without constraints
-x_fista, it, timing, criter = FISTA(x_init, f, None,opt)
-
-plt.imshow(x_fista.array)
-plt.title('FISTA Least squares')
-plt.show()
-
-plt.semilogy(criter)
-plt.title('FISTA Least squares criterion')
-plt.show()
-
-# Run FISTA for least squares with nonnegativity constraint
-x_fista0, it0, timing0, criter0 = FISTA(x_init, f, IndicatorBox(lower=0),opt)
-
-plt.imshow(x_fista0.array)
-plt.title('FISTA Least squares nonneg')
-plt.show()
-
-plt.semilogy(criter0)
-plt.title('FISTA Least squares nonneg criterion')
-plt.show()
-
-# Run FISTA for least squares with box constraint [0,1]
-x_fista01, it01, timing01, criter01 = FISTA(x_init, f, IndicatorBox(lower=0,upper=1),opt)
-
-plt.imshow(x_fista01.array)
-plt.title('FISTA Least squares box(0,1)')
-plt.show()
-
-plt.semilogy(criter01)
-plt.title('FISTA Least squares box(0,1) criterion')
-plt.show() \ No newline at end of file
diff --git a/Wrappers/Python/wip/old_demos/multifile_nexus.py b/Wrappers/Python/wip/old_demos/multifile_nexus.py
deleted file mode 100755
index d1ad438..0000000
--- a/Wrappers/Python/wip/old_demos/multifile_nexus.py
+++ /dev/null
@@ -1,307 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Aug 15 16:00:53 2018
-
-@author: ofn77899
-"""
-
-import os
-from ccpi.io.reader import NexusReader
-
-from sys import getsizeof
-
-import matplotlib.pyplot as plt
-
-from ccpi.framework import DataProcessor, DataContainer
-from ccpi.processors import Normalizer
-from ccpi.processors import CenterOfRotationFinder
-import numpy
-
-
-class averager(object):
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.N = 0
- self.avg = 0
- self.min = 0
- self.max = 0
- self.var = 0
- self.ske = 0
- self.kur = 0
-
- def add_reading(self, val):
-
- if (self.N == 0):
- self.avg = val;
- self.min = val;
- self.max = val;
- elif (self.N == 1):
- #//set min/max
- self.max = val if val > self.max else self.max
- self.min = val if val < self.min else self.min
-
-
- thisavg = (self.avg + val)/2
- #// initial value is in avg
- self.var = (self.avg - thisavg)*(self.avg-thisavg) + (val - thisavg) * (val-thisavg)
- self.ske = self.var * (self.avg - thisavg)
- self.kur = self.var * self.var
- self.avg = thisavg
- else:
- self.max = val if val > self.max else self.max
- self.min = val if val < self.min else self.min
-
- M = self.N
-
- #// b-factor =(<v>_N + v_(N+1)) / (N+1)
- #float b = (val -avg) / (M+1);
- b = (val -self.avg) / (M+1)
-
- self.kur = self.kur + (M *(b*b*b*b) * (1+M*M*M))- (4* b * self.ske) + (6 * b *b * self.var * (M-1))
-
- self.ske = self.ske + (M * b*b*b *(M-1)*(M+1)) - (3 * b * self.var * (M-1))
-
- #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
- self.var = self.var * ((M-1)/M) + (b * b * (M+1))
-
- self.avg = self.avg * (M/(M+1)) + val / (M+1)
-
- self.N += 1
-
- def stats(self, vector):
- i = 0
- while i < vector.size:
- self.add_reading(vector[i])
- i+=1
-
-avg = averager()
-a = numpy.linspace(0,39,40)
-avg.stats(a)
-print ("average" , avg.avg, a.mean())
-print ("variance" , avg.var, a.var())
-b = a - a.mean()
-b *= b
-b = numpy.sqrt(sum(b)/(a.size-1))
-print ("std" , numpy.sqrt(avg.var), b)
-#%%
-
-class DataStatMoments(DataProcessor):
- '''Normalization based on flat and dark
-
- This processor read in a AcquisitionData and normalises it based on
- the instrument reading with and without incident photons or neutrons.
-
- Input: AcquisitionData
- Parameter: 2D projection with flat field (or stack)
- 2D projection with dark field (or stack)
- Output: AcquisitionDataSetn
- '''
-
- def __init__(self, axis, skewness=False, kurtosis=False, offset=0):
- kwargs = {
- 'axis' : axis,
- 'skewness' : skewness,
- 'kurtosis' : kurtosis,
- 'offset' : offset,
- }
- #DataProcessor.__init__(self, **kwargs)
- super(DataStatMoments, self).__init__(**kwargs)
-
-
- def check_input(self, dataset):
- #self.N = dataset.get_dimension_size(self.axis)
- return True
-
- @staticmethod
- def add_sample(dataset, N, axis, stats=None, offset=0):
- #dataset = self.get_input()
- if (N == 0):
- # get the axis number along to calculate the stats
-
-
- #axs = dataset.get_dimension_size(self.axis)
- # create a placeholder for the output
- if stats is None:
- ax = dataset.get_dimension_axis(axis)
- shape = [dataset.shape[i] for i in range(len(dataset.shape)) if i != ax]
- # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
- shape.insert(0, 4+2)
- stats = numpy.zeros(shape)
-
- stats[0] = dataset.subset(**{axis:N-offset}).array[:]
-
- #avg = val
- elif (N == 1):
- # val
- stats[5] = dataset.subset(**{axis:N-offset}).array
- stats[4] = stats[0] + stats[5]
- stats[4] /= 2 # thisavg
- stats[5] -= stats[4] # (val - thisavg)
-
- #float thisavg = (avg + val)/2;
-
- #// initial value is in avg
- #var = (avg - thisavg)*(avg-thisavg) + (val - thisavg) * (val-thisavg);
- stats[1] = stats[5] * stats[5] + stats[5] * stats[5]
- #skewness = var * (avg - thisavg);
- stats[2] = stats[1] * stats[5]
- #kurtosis = var * var;
- stats[3] = stats[1] * stats[1]
- #avg = thisavg;
- stats[0] = stats[4]
- else:
-
- #float M = (float)N;
- M = N
- #val
- stats[4] = dataset.subset(**{axis:N-offset}).array
- #// b-factor =(<v>_N + v_(N+1)) / (N+1)
- stats[5] = stats[4] - stats[0]
- stats[5] /= (M+1) # b factor
- #float b = (val -avg) / (M+1);
-
- #kurtosis = kurtosis + (M *(b*b*b*b) * (1+M*M*M))- (4* b * skewness) + (6 * b *b * var * (M-1));
- #if self.kurtosis:
- # stats[3] += (M * stats[5] * stats[5] * stats[5] * stats[5]) - \
- # (4 * stats[5] * stats[2]) + \
- # (6 * stats[5] * stats[5] * stats[1] * (M-1))
-
- #skewness = skewness + (M * b*b*b *(M-1)*(M+1)) - (3 * b * var * (M-1));
- #if self.skewness:
- # stats[2] = stats[2] + (M * stats[5]* stats[5] * stats[5] * (M-1)*(M-1) ) -\
- # 3 * stats[5] * stats[1] * (M-1)
- #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
- #var = var * ((M-1)/M) + (b * b * (M+1));
- stats[1] = ((M-1)/M) * stats[1] + (stats[5] * stats[5] * (M+1))
-
- #avg = avg * (M/(M+1)) + val / (M+1)
- stats[0] = stats[0] * (M/(M+1)) + stats[4] / (M+1)
-
- N += 1
- return stats , N
-
-
- def process(self):
-
- data = self.get_input()
-
- #stats, i = add_sample(0)
- N = data.get_dimension_size(self.axis)
- ax = data.get_dimension_axis(self.axis)
- stats = None
- i = 0
- while i < N:
- stats , i = DataStatMoments.add_sample(data, i, self.axis, stats, offset=self.offset)
- self.offset += N
- labels = ['StatisticalMoments']
-
- labels += [data.dimension_labels[i] \
- for i in range(len(data.dimension_labels)) if i != ax]
- y = DataContainer( stats[:4] , False,
- dimension_labels=labels)
- return y
-
-directory = r'E:\Documents\Dataset\CCPi\Nexus_test'
-data_path="entry1/instrument/pco1_hw_hdf_nochunking/data"
-
-reader = NexusReader(os.path.join( os.path.abspath(directory) , '74331.nxs'))
-
-print ("read flat")
-read_flat = NexusReader(os.path.join( os.path.abspath(directory) , '74240.nxs'))
-read_flat.data_path = data_path
-flatsslice = read_flat.get_acquisition_data_whole()
-avg = DataStatMoments('angle')
-
-avg.set_input(flatsslice)
-flats = avg.get_output()
-
-ave = averager()
-ave.stats(flatsslice.array[:,0,0])
-
-print ("avg" , ave.avg, flats.array[0][0][0])
-print ("var" , ave.var, flats.array[1][0][0])
-
-print ("read dark")
-read_dark = NexusReader(os.path.join( os.path.abspath(directory) , '74243.nxs'))
-read_dark.data_path = data_path
-
-## darks are very many so we proceed in batches
-total_size = read_dark.get_projection_dimensions()[0]
-print ("total_size", total_size)
-
-batchsize = 40
-if batchsize > total_size:
- batchlimits = [batchsize * (i+1) for i in range(int(total_size/batchsize))] + [total_size-1]
-else:
- batchlimits = [total_size]
-#avg.N = 0
-avg.offset = 0
-N = 0
-for batch in range(len(batchlimits)):
- print ("running batch " , batch)
- bmax = batchlimits[batch]
- bmin = bmax-batchsize
-
- darksslice = read_dark.get_acquisition_data_batch(bmin,bmax)
- if batch == 0:
- #create stats
- ax = darksslice.get_dimension_axis('angle')
- shape = [darksslice.shape[i] for i in range(len(darksslice.shape)) if i != ax]
- # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
- shape.insert(0, 4+2)
- print ("create stats shape ", shape)
- stats = numpy.zeros(shape)
- print ("N" , N)
- #avg.set_input(darksslice)
- i = bmin
- while i < bmax:
- stats , i = DataStatMoments.add_sample(darksslice, i, 'angle', stats, bmin)
- print ("{0}-{1}-{2}".format(bmin, i , bmax ) )
-
-darks = stats
-#%%
-
-fig = plt.subplot(2,2,1)
-fig.imshow(flats.subset(StatisticalMoments=0).array)
-fig = plt.subplot(2,2,2)
-fig.imshow(numpy.sqrt(flats.subset(StatisticalMoments=1).array))
-fig = plt.subplot(2,2,3)
-fig.imshow(darks[0])
-fig = plt.subplot(2,2,4)
-fig.imshow(numpy.sqrt(darks[1]))
-plt.show()
-
-
-#%%
-norm = Normalizer(flat_field=flats.array[0,200,:], dark_field=darks[0,200,:])
-#norm.set_flat_field(flats.array[0,200,:])
-#norm.set_dark_field(darks.array[0,200,:])
-norm.set_input(reader.get_acquisition_data_slice(200))
-
-n = Normalizer.normalize_projection(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:], 1e-5)
-#dn_n= Normalizer.estimate_normalised_error(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:],
-# numpy.sqrt(flats.array[1,200,:]), numpy.sqrt(darks[1,200,:]))
-#%%
-
-
-#%%
-fig = plt.subplot(2,1,1)
-
-
-fig.imshow(norm.get_input().as_array())
-fig = plt.subplot(2,1,2)
-fig.imshow(n)
-
-#fig = plt.subplot(3,1,3)
-#fig.imshow(dn_n)
-
-
-plt.show()
-
-
-
-
-
-
diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py
deleted file mode 100644
index 3fc9320..0000000
--- a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Feb 22 14:53:03 2019
-
-@author: evangelos
-"""
-
-from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-from ccpi.optimisation.algorithms import PDHG, PDHG_old
-
-from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
-from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \
- MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction
-
-from skimage.util import random_noise
-
-
-
-# ############################################################################
-# Create phantom for TV denoising
-
-N = 100
-data = np.zeros((N,N))
-data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
-data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
-
-ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
-ag = ig
-
-# Create noisy data. Add Gaussian noise
-n1 = random_noise(data, mode='gaussian', seed=10)
-noisy_data = ImageData(n1)
-
-
-#%%
-
-# Regularisation Parameter
-alpha = 2
-
-#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ")
-method = '0'
-if method == '0':
-
- # Create operators
- op1 = Gradient(ig)
- op2 = Identity(ig, ag)
-
- # Form Composite Operator
- operator = BlockOperator(op1, op2, shape=(2,1) )
-
- #### Create functions
-# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \
-# L2NormSq(0.5, b = noisy_data) )
-
- f1 = alpha * MixedL21Norm()
- f2 = 0.5 * L2NormSquared(b = noisy_data)
-
- f = BlockFunction(f1, f2 )
- g = ZeroFun()
-
-else:
-
- ###########################################################################
- # No Composite #
- ###########################################################################
- operator = Gradient(ig)
- f = alpha * FunctionOperatorComposition(operator, MixedL21Norm())
- g = 0.5 * L2NormSquared(b = noisy_data)
- ###########################################################################
-#%%
-
-diag_precon = True
-
-if diag_precon:
-
- def tau_sigma_precond(operator):
-
- tau = 1/operator.sum_abs_row()
- sigma = 1/ operator.sum_abs_col()
-
- return tau, sigma
-
- tau, sigma = tau_sigma_precond(operator)
-
-else:
- # Compute operator Norm
- normK = operator.norm()
- print ("normK", normK)
- # Primal & dual stepsizes
- sigma = 1/normK
- tau = 1/normK
-# tau = 1/(sigma*normK**2)
-
-#%%
-
-opt = {'niter':2000}
-
-res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
-
-plt.figure(figsize=(5,5))
-plt.imshow(res.as_array())
-plt.colorbar()
-plt.show()
-
-#aaa = res[0].as_array()
-#
-#plt.imshow(aaa)
-#plt.colorbar()
-#plt.show()
-#c2 = aaa
-#del aaa
-#%%
-
-#c2 = aaa
-##%%
-#%%
-#z = c1 - c2
-#plt.imshow(np.abs(z[0:95,0:95]))
-#plt.colorbar()
-
-#%%
-#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
-#pdhg.max_iteration = 2000
-#pdhg.update_objective_interval = 10
-#
-#pdhg.run(2000)
-#
-#
-#
-#sol = pdhg.get_output().as_array()
-##sol = result.as_array()
-##
-#fig = plt.figure()
-#plt.subplot(1,2,1)
-#plt.imshow(noisy_data.as_array())
-##plt.colorbar()
-#plt.subplot(1,2,2)
-#plt.imshow(sol)
-##plt.colorbar()
-#plt.show()
-##
-#
-###
-#plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth')
-#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon')
-#plt.legend()
-#plt.show()
-#
-
-#%%
-#