From a07d236c3fe5a24d3b1be9376312f1088250507e Mon Sep 17 00:00:00 2001
From: Anton Vasilev <31480657+nocliper@users.noreply.github.com>
Date: Thu, 2 Nov 2023 16:13:26 +0300
Subject: [PATCH 1/3] for review
---
modules/L1L2.py | 67 ----
modules/L1_FISTA.py | 21 --
modules/L2.py | 67 ----
modules/contin.py | 131 --------
modules/demo.py | 63 ----
modules/filebrowser.py | 53 ----
modules/fista.py | 685 -----------------------------------------
modules/hp.py | 205 ------------
modules/interface.py | 142 ---------
modules/laplace.py | 59 ----
modules/plot_data.py | 89 ------
modules/reSpect.py | 315 -------------------
modules/read_file.py | 81 -----
modules/regopt.py | 189 ------------
14 files changed, 2167 deletions(-)
delete mode 100644 modules/L1L2.py
delete mode 100644 modules/L1_FISTA.py
delete mode 100644 modules/L2.py
delete mode 100644 modules/contin.py
delete mode 100644 modules/demo.py
delete mode 100644 modules/filebrowser.py
delete mode 100644 modules/fista.py
delete mode 100644 modules/hp.py
delete mode 100644 modules/interface.py
delete mode 100644 modules/laplace.py
delete mode 100644 modules/plot_data.py
delete mode 100644 modules/reSpect.py
delete mode 100644 modules/read_file.py
delete mode 100644 modules/regopt.py
diff --git a/modules/L1L2.py b/modules/L1L2.py
deleted file mode 100644
index 25167f0..0000000
--- a/modules/L1L2.py
+++ /dev/null
@@ -1,67 +0,0 @@
-""" Module to implement L1 and/or L2 regularization with
-SciPy linear_models module
-"""
-
-def L1L2(t, F, bound, Nz, alpha1, alpha2, iterations = 10000):
- """
- Returns solution using mixed L1 and/or L2 regularization
- with simple gradient descent
-
- F(t) = ∫f(s)*exp(-s*t)ds
-
- or
-
- min = ||C*f - F||2 + alpha1*||I*f||1 + alpha2*||I*f||2
-
- Parameters:
- ------------
- t : array
- Time domain data from experiment
- F : array,
- Transient data from experiment F(t)
- bound : list
- [lowerbound, upperbound] of s domain points
- Nz : int
- Number of points z to compute, must be smaller than len(F)
- alpha1, alpha 2 : floats
- Regularization parameters for L1 and L2 regularizers
- iterations : int
- Maximum number of iterations. Optional
-
-
- Returns:
- ------------
- s : array
- Emission rates domain points (evenly spaced on log scale)
- f : array
- Inverse Laplace transform f(s)
- F_restored : array
- Reconstructed transient from C@f + intercept
- """
-
- import numpy as np
- from scipy.sparse import diags
- from sklearn.linear_model import ElasticNet
-
- # set up grid points (# = Nz)
- h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
- s = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
-
- # construct C matrix from [1]
- s_mesh, t_mesh = np.meshgrid(s, t)
- C = np.exp(-t_mesh*s_mesh)
- C[:, 0] /= 2.
- C[:, -1] /= 2.
- C *= h
-
- alpha = alpha1 + alpha2
- l1_ratio = alpha1/alpha
-
- model = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, tol = 1e-12,
- fit_intercept = True, max_iter = iterations)
- model.fit(C, F)
-
- f = model.coef_
-
- F_restored = C@f + model.intercept_
- return s, f, F_restored
\ No newline at end of file
diff --git a/modules/L1_FISTA.py b/modules/L1_FISTA.py
deleted file mode 100644
index 5c47079..0000000
--- a/modules/L1_FISTA.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from fista import Fista
-import numpy as np
-
-def l1_fista(t, F, bound, Nz, alpha, iterations = 50000, penalty = 'l11'):
-
- "Function to implement FISTA algorithm"
-
- h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
- z = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
-
- z_mesh, t_mesh = np.meshgrid(z, t)
- C = np.exp(-t_mesh*z_mesh) # specify integral kernel
- C[:, 0] /= 2.
- C[:, -1] /= 2.
- C *= h
-
- fista = Fista(loss='least-square', penalty = penalty, lambda_=alpha, n_iter = iterations)
- fista.fit(C, F)
-
-
- return z, fista.coefs_, fista.predict(C)
diff --git a/modules/L2.py b/modules/L2.py
deleted file mode 100644
index fc75be7..0000000
--- a/modules/L2.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Module to implement routines of numerical inverse
-Laplace tranform using L2 regularization algorithm
-"""
-
-import numpy as np
-from scipy.sparse import diags
-
-def L2(t, F, bound, Nz, alpha):
- """
- Returns solution for problem imposing L2 regularization
-
- F(t) = ∫f(s)*exp(-s*t)ds
-
- or
-
- min = ||C*f - F||2 + alpha*||I*f||2
-
-
- Parameters:
- ------------
- t : array
- Time domain data from experiment
- F : array,
- Transient data from experiment F(t)
- bound : list
- [lowerbound, upperbound] of s domain points
- Nz : int
- Number of points z to compute, must be smaller than len(F)
- alpha : float
- Regularization parameters for L2 regularizers
- iterations : int
- Maximum number of iterations. Optional
-
-
- Returns:
- ------------
- s : array
- Emission rates domain points (evenly spaced on log scale)
- f : array
- Inverse Laplace transform f(s)
- F_restored : array
- Reconstructed transient from C@f + intercept
- """
-
- # set up grid points (# = Nz)
- h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
- s = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
-
- # construct C matrix from [1]
- s_mesh, t_mesh = np.meshgrid(s, t)
- C = np.exp(-t_mesh*s_mesh)
- C[:, 0] /= 2.
- C[:, -1] /= 2.
- C *= h
-
- l2 = alpha
-
- data = [-2*np.ones(Nz), 1*np.ones(Nz), 1*np.ones(Nz)]
- positions = [-1, -2, 0]
- I = diags(data, positions, (Nz+2, Nz)).toarray()
- #I = np.identity(Nz)
-
- f = np.linalg.solve(l2*np.dot(I.T,I) + np.dot(C.T,C), np.dot(C.T,F))
-
- F_restored = C@f
-
- return s, f, F_restored#, res_norm, sol_norm
diff --git a/modules/contin.py b/modules/contin.py
deleted file mode 100644
index 24ee1ee..0000000
--- a/modules/contin.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""Module to implement routines of numerical inverse
-Laplace tranform using Contin algorithm [1]
-
-[1] Provencher, S. (1982)
-"""
-
-from __future__ import division
-import numpy as np
-
-def Contin(t, F, bound, Nz, alpha):
- """
- Function returns processed data f(z) from the equation
-
- F(t) = ∫f(z)*exp(-z*t)
-
- Parameters:
- --------------
- t : array
- Time domain data from experiment
- F : array,
- Transient data from experiment F(t)
- bound : list
- [lowerbound, upperbound] of z domain points
- Nz : int
- Number of points z to compute, must be smaller than len(F)
- alpha : float
- Regularization parameter
-
- Returns:
- --------------
- z : array
- Emission rates domain points (evenly spaced on log scale)
- f : array
- Inverse Laplace transform f(z)
- F_restored : array
- Reconstructed transient from C@f(z)
- """
-
-
- # set up grid points (# = Nz)
- h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
- z = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
-
- # construct C matrix from [1]
- z_mesh, t_mesh = np.meshgrid(z, t)
- C = np.exp(-t_mesh*z_mesh)
- C[:, 0] /= 2.
- C[:, -1] /= 2.
- C *= h
-
- # construct regularization matrix R to impose gaussian-like peaks in f(z)
- # R - tridiagonal matrix (1,-2,1)
- Nreg = Nz + 2
- R = np.zeros([Nreg, Nz])
- R[0, 0] = 1.
- R[-1, -1] = 1.
- R[1:-1, :] = -2*np.diag(np.ones(Nz)) + np.diag(np.ones(Nz-1), 1) \
- + np.diag(np.ones(Nz-1), -1)
-
- #R = U*H*Z^T
- U, H, Z = np.linalg.svd(R, full_matrices=False) # H diagonal
- Z = Z.T
- H = np.diag(H)
-
- #C*Z*inv(H) = Q*S*W^T
- Hinv = np.diag(1.0/np.diag(H))
- Q, S, W = np.linalg.svd(C.dot(Z).dot(Hinv), full_matrices=False) # S diag
- W = W.T
- S = np.diag(S)
-
- # construct GammaTilde & Stilde
- # ||GammaTilde - Stilde*f5||^2 = ||Xi||^2
- Gamma = np.dot(Q.T, F)
- Sdiag = np.diag(S)
- Salpha = np.sqrt(Sdiag**2 + alpha**2)
- GammaTilde = Gamma*Sdiag/Salpha
- Stilde = np.diag(Salpha)
-
- # construct LDP matrices G = Z*inv(H)*W*inv(Stilde), B = -G*GammaTilde
- # LDP: ||Xi||^2 = min, with constraint G*Xi >= B
- Stilde_inv = np.diag(1.0/np.diag(Stilde))
- G = Z @ Hinv @ W @ Stilde_inv
- B = -G @ GammaTilde
-
- # call LDP solver
- Xi = ldp(G, B)
-
- # final solution
- zf = np.dot(G, Xi + GammaTilde)
- f = zf/z
-
- F_restored = C@zf
-
- return z, f, F_restored
-
-
-def ldp(G, h):
- """
- Helper for Contin() for solving NNLS [1]
-
- [1] - Lawson and Hanson’s (1974)
- Parameters:
- -------------
- G : matrix
- Z*inv(H)*W*inv(Stilde)
- h : array
- -G*GammaTilde
-
- Returns:
- -------------
- x : array
- Solution of argmin_x || Ax - b ||_2
- """
-
- from scipy.optimize import nnls
-
- m, n = G.shape
- A = np.concatenate((G.T, h.reshape(1, m)))
- b = np.zeros(n+1)
- b[n] = 1.
-
- # Solving for argmin_x || Ax - b ||_2
- x, resnorm = nnls(A, b)
-
- r = A@x - b
-
- if np.linalg.norm(r) == 0:
- print('\n No solution found, try different input!')
- else:
- x = -r[0:-1]/r[-1]
- return x
diff --git a/modules/demo.py b/modules/demo.py
deleted file mode 100644
index f11bfd9..0000000
--- a/modules/demo.py
+++ /dev/null
@@ -1,63 +0,0 @@
-def demo(Index, Nz, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, dt, Methods, Plot, Residuals, LCurve, Arrhenius, Heatplot):
- """Gets data from widgets initialized with interface(),
- calls laplace() to process data and calls plot_data(), hp()
- to plot results
-
- Parameters:
- -------------
- Index : int
- Index of transient in dataset
- Nz : int
- Value which is length of calculated vector
- Reg_L1, Reg_L2 : floats
- Reg. parameters for FISTA(L1) and L2 regularization
- Reg_C, Reg_S : floats
- Reg. parameters for CONTIN and reSpect algorithms
- Bounds : list
- [lowerbound, upperbound ] bounds of emission rates domain points
- dt : int
- Time step of transient data points in ms
- Methods : list
- Methods to process data
- Plot : boolean
- Calls plot_data() if True
- Residuals : boolean
- Calls regopt() and plots L-curve to control
- regularization if True
- LCurve : boolean,
- Automatically picks optimal reg. parameter from
- L-curve if True
- Heatplot : boolean
- Plots heatplot for all dataset and saves data
- in .LDLTS if True
- """
-
- import numpy as np
- import matplotlib.pyplot as plt
-
- from read_file import read_file
- from laplace import laplace
- from plot_data import plot_data
- from hp import hp
- from read_file import read_file
- from regopt import regopt
- from interface import interface
-
- Bounds = 10.0**np.asarray(Bounds)
-
- t, C, T = read_file(interface.path, dt, proc = True)# time, transients, temperatures
-
- data = laplace(t, C[Index], Nz, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Methods)
- #print(data)
-
- if Plot:
- ay, aph = plot_data(t, C[Index], data, T, Index)
-
- if Heatplot:
- hp(t, C, T, aph, Methods, Index, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz, LCurve, Arrhenius)
-
- if Residuals:
- regopt(t, C[Index], ay, Methods, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz)
-
- plt.show()
-
\ No newline at end of file
diff --git a/modules/filebrowser.py b/modules/filebrowser.py
deleted file mode 100644
index 860804a..0000000
--- a/modules/filebrowser.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import os
-import ipywidgets as widgets
-
-l = widgets.Layout(width='50%')
-
-class FileBrowser(object):
- def __init__(self):
- self.path = os.getcwd()
- self._update_files()
-
- def _update_files(self):
- self.files = list()
- self.folders = list()
- if(os.path.isdir(self.path)):
- for f in os.listdir(self.path):
- ff = os.path.join(self.path, f)
- if os.path.isdir(ff):
- self.folders.append(f)
- else:
- self.files.append(f)
-
- def widget(self):
- box = widgets.VBox()
- self._update(box)
- return box
-
- def _update(self, box):
-
- def on_click(b):
- if b.description == '..':
- self.path = os.path.split(self.path)[0]
- else:
- self.path = os.path.join(self.path, b.description)
- self._update_files()
- self._update(box)
-
- buttons = []
- if self.files or self.folders:
- button = widgets.Button(layout = l, description='..', button_style='primary')
- button.on_click(on_click)
- buttons.append(button)
- for f in self.folders:
- if f[0] != '.' and f[:2] != '__' and f != 'processed' and f != 'modules':
- button = widgets.Button(layout = l, description=f, button_style='info')
- button.on_click(on_click)
- buttons.append(button)
- for f in self.files:
- if f[0] != '.' and f[-5:] == '.DLTS' or f[-5:] == '.PERS':
- button = widgets.Button(layout = l, description=f, button_style='success')
- button.on_click(on_click)
- buttons.append(button)
-
- box.children = tuple([widgets.HTML("
%s
" % (self.path,))] + buttons)
diff --git a/modules/fista.py b/modules/fista.py
deleted file mode 100644
index 6d616c3..0000000
--- a/modules/fista.py
+++ /dev/null
@@ -1,685 +0,0 @@
-"""
-Module implementing the FISTA algorithm
-"""
-from __future__ import division, print_function
-
-__author__ = 'Jean KOSSAIFI'
-
-import numpy as np
-import sys
-from scipy.linalg import norm
-from math import sqrt
-from sklearn.base import BaseEstimator
-#from sklearn.datasets.base import Bunch
-from sklearn.metrics import roc_auc_score
-from hashlib import sha1
-
-
-def mixed_norm(coefs, p, q=None, n_samples=None, n_kernels=None):
- """ Computes the (p, q) mixed norm of the vector coefs
-
- Parameters
- ----------
- coefs : ndarray
- a vector indexed by (l, m)
- with l in range(0, n_kernels)
- and m in range(0, n_samples)
-
- p : int or np.inf
-
- q : int or np.int
-
- n_samples : int, optional
- number of elements in each kernel
- default is None
-
- n_kernels : int, optional
- number of kernels
- default is None
-
- Returns
- -------
- float
- """
- if q is None or p == q:
- return norm(coefs, p)
- else:
- return norm([norm(i, p) for i in coefs.reshape(
- n_kernels, n_samples)], q)
-
-
-def dual_mixed_norm(coefs, n_samples, n_kernels, norm_):
- """ Returns a function corresponding to the dual mixt norm
-
- Parameters
- ----------
- coefs : ndarray
- a vector indexed by (l, m)
- with l in range(0, n_kernels)
- and m in range(0, n_samples)
-
- n_samples : int, optional
- number of elements in each kernel
- default is None
-
- n_kernels : int, optional
- number of kernels
- default is None
-
- norm_ : {'l11', 'l12', 'l21', 'l22'}
- the dual mixed norm we want to compute
-
- Returns
- -------
- float
- """
- if norm_ == 'l11':
- res = norm(coefs, np.inf)
- elif norm_ == 'l12':
- res = mixed_norm(coefs, np.inf, 2, n_samples, n_kernels)
- elif norm_ == 'l21':
- res = mixed_norm(coefs, 2, np.inf, n_samples, n_kernels)
- else:
- res = norm(coefs, 2)
- return res
-
-
-def by_kernel_norm(coefs, p, q, n_samples, n_kernels):
- """ Computes the (p, q) norm of coefs for each kernel
-
- Parameters
- ----------
- coefs : ndarray
- a vector indexed by (l, m)
- with l in range(0, n_kernels)
- and m in range(0, n_samples)
-
- p : int or np.inf
-
- q : int or np.inf
-
- n_samples : int, optional
- number of elements in each kernel
- default is None
-
- n_kernels : int, optional
- number of kernels
- default is None
-
- Returns
- -------
- A list of the norms of the sub vectors associated to each kernel
- """
- return [mixed_norm(i, p, q, n_samples, 1)
- for i in coefs.reshape(n_kernels, n_samples)]
-
-
-def prox_l11(u, lambda_):
- """ Proximity operator for l(1, 1, 2) norm
-
-
-
- :math:`\\hat{\\alpha}_{l,m} = sign(u_{l,m})\\left||u_{l,m}| - \\lambda \\right|_+`
-
- Parameters
- ----------
- u : ndarray
- The vector (of the n-dimensional space) on witch we want
- to compute the proximal operator
-
- lambda_ : float
- regularisation parameter
-
- Returns
- -------
- ndarray : the vector corresponding to the application of the
- proximity operator to u
-
- """
- return np.sign(u) * np.maximum(np.abs(u) - lambda_, 0.)
-
-def prox_l22(u, lambda_):
- """ proximity operator l(2, 2, 2) norm
-
- Parameters
- ----------
-
- u : ndarray
- The vector (of the n-dimensional space) on witch we want to compute the proximal operator
-
- lambda_ : float
- regularisation parameter
-
- Returns
- -------
-
- ndarray : the vector corresponding to the application of the proximity operator to u
-
- Notes
- -----
-
- :math:`\\hat{\\alpha}_{l,m} = \\frac{1}{1 + \\lambda} \\, u_{l,m}`
-
- """
- return 1./(1.+lambda_)*u
-
-def prox_l21_1(u, l, n_samples, n_kernels):
- """ Proximity operator l(2, 1, 1) norm
-
- Parameters
- ----------
- u : ndarray
- The vector (of the n-dimensional space) on witch we want to compute the proximal operator
-
- lambda_ : float
- regularisation parameter
-
- n_samples : int, optional
- number of elements in each kernel
- default is None
-
- n_kernels : int, optional
- number of kernels
- default is None
-
- Returns
- -------
- ndarray : the vector corresponding to the application of the proximity operator to u
-
-
- Notes
- -----
-
- .. math::
-
- \hat{\alpha}_{l,m} = u_{l,m} \left| 1 - \frac{\lambda}{\|u_{l \bullet}\|_{2}} \right|_+\
-
- where l is in range(0, n_samples) and m is in range(0, n_kernels)
- so :math:`u_{l\\bullet}` = [u(l, m) for m in n_kernels]
-
- """
- return (u.reshape(n_kernels, n_samples) *\
- [max(1. - l/norm(u[np.arange(n_kernels)*n_samples+i], 2), 0.)
- for i in range(n_samples)]).reshape(-1)
-
-
-def prox_l21(u, l, n_samples, n_kernels):
- """ proximity operator l(2, 1, 2) norm
-
- Parameters
- ----------
- u : ndarray
- The vector (of the n-dimensional space) on witch we want to compute the proximal operator
-
- lambda_ : float
- regularisation parameter
-
- n_samples : int, optional
- number of elements in each kernel
- default is None
-
- n_kernels : int, optional
- number of kernels
- default is None
-
-
- Returns
- -------
- ndarray : the vector corresponding to the application of the proximity operator to u
-
- Notes
- -----
-
- :math:`\\hat{\\alpha}_{l,m} = u_{l,m} \\left| 1 - \\frac{ \\lambda}{ \\|u_{l \\bullet }\\|_{2}} \\right|_+`
-
- where l is in range(0, n_kernels) and m is in range(0, n_samples)
- so :math:`u_{l \\bullet }` = [u(l, m) for l in n_samples]
-
- """
- for i in u.reshape(n_kernels, n_samples):
- n = norm(i, 2)
- if n==0 or n==np.Inf:
- i[:] = 0
- else:
- i[:] *= max(1. - l/n, 0.)
- # !! If you do just i *= , u isn't modified
- # The slice is needed here so that the array can be modified
- return u
-
-
-def prox_l12(u, l, n_samples, n_kernels):
- """ proximity operator for l(1, 2, 2) norm
-
- Parameters
- ----------
- u : ndarray
- The vector (of the n-dimensional space) on witch we want to compute the proximal operator
-
- lambda_ : float
- regularization parameter
-
- n_samples : int, optional
- number of elements in each kernel
- default is None
-
- n_kernels : int, optional
- number of kernels
- default is None
-
- Returns
- -------
- ndarray : the vector corresponding to the application of the proximity operator to u
-
-
- Notes
- -----
-
- :math:`\\hat{\\alpha}_{l,m} = sign(u_{l,m})\\left||u_{l,m}| - \\frac{\\lambda \\sum\\limits_{m_l=1}^{M_l} u2_{l,m_l}}{(1+\\lambda M_l) \\|u_{l \\bullet }\\|_{2}} \\right|_+`
-
- where :math:`u2_{l,m_l}` denotes the :math:`|u_{l,m_l}|`
- ordered by descending order for fixed :math:`l`, and the
- quantity :math:`M_l` is the number computed in compute_M
-
- """
- for i in u.reshape(n_kernels, n_samples):
- Ml, sum_Ml = compute_M(i, l, n_samples)
- # i[:] so that u is really modified
- n = norm(i, 2)
- if n == 0 or n == np.Inf:
- i[:] = 0
- else:
- i[:] = np.sign(i)*np.maximum(
- np.abs(i)-(l*sum_Ml)/((1.+l*Ml)*n), 0.)
- return u
-
-def compute_M(u, lambda_, n_samples):
- """
- Parameters
- ----------
- u : ndarray
- ndarray of size (n_samples * n_samples) representing a subvector of K,
- ie the samples for a single kernel
-
- lambda_ : int
-
- n_samples : int
- number of elements in each kernel
- ie number of elements of u
-
- Notes
- -----
-
- :math:`M_l` is the number such that
-
- :math:`u2_{l,M_l+1} \\leq \\lambda \\sum_{m_l=1}^{M_l+1} \\left( u2_{l,m_l} - u2_{l,M_l+1}\\right)`
-
- and
-
-
- :math:`u2_{l,M_l} > \\lambda\\sum_{m_l=1}^{M_l} \\left( u2_{l,m_l} - u2_{k,M_l}\\right)`
-
- Detailed explication
-
- let u denotes |u(l)|, the vector associated with the kernel l, ordered by descending order
- Ml is the integer such that
- u(Ml) <= l * sum(k=1..Ml + 1) (u(k) - u(Ml + 1)) (S1)
- and
- u(Ml) > l * sum(k=1..Ml) (u(k) - u(Ml) (S2)
- Note that in that definition, Ml is in [1..Ml]
- In python, while Ml is in [1..(Ml-1)], indices will be in [0..(Ml-1)], so we must take care of indices.
- That's why, we consider Ml is in [0..(Ml-1)] and, at the end, we add 1 to the result
-
- Detailed example
-
- if u(l) = [0 1 2 3] corresponds to the vector associated with a kernel
- then u = |u(l)| ordered by descending order ie u = [3 2 1 0]
-
- Then u = [3 2 1 0]
- let l = 1
- Ml is in {0, 1, 2} (not 3 because we also consider Ml+1)
- # Note : in fact Ml is in {1, 2, 3} but it is more convenient
- # to consider it is in {0, 1, 2} as indexing in python starts at 0
- # We juste have to add 1 to the final result
-
- if Ml = 0 then S1 = 1 and S2 = 0
- if Ml = 1 then S1 = 3 and S2 = 1
- if Ml = 2 then S1 = 6 and S2 = 3
-
- if Ml = 0 then u(Ml+1)=u(1)=2 > l*... =1 (S1 is not verified)
- and u(Ml)=u(0)=3 > l*... =0 (S2 is verified)
-
- if Ml = 1 then u(Ml+1)=u(2)=1 <= l*... =3 (S1 is verified)
- and u(Ml)=u(1)=2 > l*... =1 (S2 is verified)
-
- if Ml = 2 then u(Ml+1)=u(3)=0 <= l*... =6 (S1 is verified)
- but u(Ml)=u(2)=1 <= l*... =3 (S1 is not verified)
-
- Conclusion : Ml = 1 + 1 !!
- Ml = 2 because in python, indexing starts at 0, so Ml +1
-
- """
- u = np.sort(np.abs(u))[::-1]
- S1 = u[1:] - lambda_*(np.cumsum(u)[:-1] - (np.arange(n_samples-1)+1)*u[1:])
- S2 = u[:-1] - lambda_*(np.cumsum(u)[:-1] - (np.arange(n_samples-1)+1)*u[:-1])
- Ml = np.argmax((S1<=0.) & (S2>0.)) + 1
-
- return Ml, np.sum(u[:Ml]) # u[:Ml] = u[0, 1, ..., Ml-1] !!
-
-
-def hinge_step(y, K, Z):
- """
- Returns the point in witch we apply gradient descent
-
- parameters
- ----------
- y : np-array
- the labels vector
-
- K : 2D np-array
- the concatenation of all the kernels, of shape
- n_samples, n_kernels*n_samples
-
- Z : a linear combination of the last two coefficient vectors
-
- returns
- -------
- res : np-array of shape n_samples*,_kernels
- a point of the space where we will apply gradient descent
- """
- return np.dot(K.transpose(), np.maximum(1 - np.dot(K, Z), 0))
-
-def least_square_step(y, K, Z):
- """
- Returns the point in witch we apply gradient descent
-
- parameters
- ----------
- y : np-array
- the labels vector
-
- K : 2D np-array
- the concatenation of all the kernels, of shape
- n_samples, n_kernels*n_samples
-
- Z : a linear combination of the last two coefficient vectors
-
- returns
- -------
- res : np-array of shape n_samples*,_kernels
- a point of the space where we will apply gradient descent
- """
- return np.dot(K.transpose(), y - np.dot(K,Z))
-
-
-def _load_Lipschitz_constant(K):
- """ Loads the Lipschitz constant and computes it if not already saved
-
- Parameters
- ----------
- K : 2D-ndarray
- The matrix of witch we want to compute the Lipschitz constant
-
- Returns
- -------
- float
-
- Notes
- -----
- Lipshitz constant is just a number < 2/norm(np.dot(K, K.T), 2)
-
- The constant is stored in a npy hidden file, in the current directory.
- The filename is the sha1 hash of the ndarray
-
- """
- try:
- mu = np.load('./.%s.npy' % sha1(K).hexdigest())
- except:
- mu = 1/norm(np.dot(K, K.transpose()), 2)
- np.save('./.%s.npy' % sha1(K).hexdigest(), mu)
- return mu
-
-
-class Fista(BaseEstimator):
- """
-
- Fast iterative shrinkage/thresholding Algorithm
-
- Parameters
- ----------
-
- lambda_ : int, optional
- regularization parameter
- default is 0.5
-
- loss : {'squared-hinge', 'least-square'}, optional
- the loss function to use
- defautl is 'squared-hinge'
-
- penalty : {'l11', 'l22', 'l12', 'l21'}, optional
- norm to use as penalty
- default is l11
-
- n_iter : int, optional
- number of iterations
- default is 1000
-
- recompute_Lipschitz_constant : bool, optional
- if True, the Lipschitz constant is recomputed every time
- if False, it is stored based on it's sha1 hash
- default is False
-
- """
-
- def __init__(self, lambda_=0.5, loss='squared-hinge', penalty='l11', n_iter=1000, recompute_Lipschitz_constant=False):
- self.n_iter = n_iter
- self.lambda_ = lambda_
- self.loss = loss
- self.penalty = penalty
- self.p = int(penalty[1])
- self.q = int(penalty[2])
- self.recompute_Lipschitz_constant = recompute_Lipschitz_constant
-
- def fit(self, K, y, Lipschitz_constant=None, verbose=0, **params):
- """ Fits the estimator
-
- We want to solve a problem of the form y = KB + b
- where K is a (n_samples, n_kernels*n_samples) matrix.
-
- Parameters
- ---------
- K : ndarray
- numpy array of shape (n, p)
- K is the concatenation of the p/n kernels
- where each kernel is of size (n, n)
-
- y : ndarray
- an array of the labels to predict for each kernel
- y is of size p
- where K.shape : (n, p)
-
- Lipschitz_constant : float, optional
- allow the user to pre-compute the Lipschitz constant
- (its computation can be very slow, so that parameter is very
- useful if you were to use several times the algorithm on the same data)
-
- verbose : {0, 1}, optional
- verbosity of the method : 1 will display information while 0 will display nothing
- default = 0
-
- Returns
- -------
- self
- """
- next_step = hinge_step
- if self.loss=='squared-hinge':
- K = y[:, np.newaxis] * K
- # Equivalent to K = np.dot(np.diag(y), X) but faster
- elif self.loss=='least-square':
- next_step = least_square_step
-
- (n_samples, n_features) = K.shape
- n_kernels = int(n_features/n_samples) # We assume each kernel is a square matrix
- self.n_samples, self.n_kernels = n_samples, n_kernels
-
- if Lipschitz_constant==None:
- Lipschitz_constant = _load_Lipschitz_constant(K)
-
- tol = 10**(-6)
- coefs_current = np.zeros(n_features, dtype=np.float) # coefficients to compute
- coefs_next = np.zeros(n_features, dtype=np.float)
- Z = np.copy(coefs_next) # a linear combination of the coefficients of the 2 last iterations
- tau_1 = 1
-
- if self.penalty=='l11':
- prox = lambda u:prox_l11(u, self.lambda_*Lipschitz_constant)
- elif self.penalty=='l22':
- prox = lambda u:prox_l22(u, self.lambda_*Lipschitz_constant)
- elif self.penalty=='l21':
- prox = lambda u:prox_l21(u, self.lambda_*Lipschitz_constant, n_samples, n_kernels)
- elif self.penalty=='l12':
- prox = lambda u:prox_l12(u, self.lambda_*Lipschitz_constant, n_samples, n_kernels)
-
- if verbose==1:
- self.iteration_dual_gap = list()
-
- for i in range(self.n_iter):
- coefs_current = coefs_next # B_(k-1) = B_(k)
- coefs_next = prox(Z + Lipschitz_constant*next_step(y, K, Z))
-
- tau_0 = tau_1 #tau_(k+1) = tau_k
- tau_1 = (1 + sqrt(1 + 4*tau_0**2))/2
-
- Z = coefs_next + (tau_0 - 1)/tau_1*(coefs_next - coefs_current)
-
- # Dual problem
- objective_var = 1 - np.dot(K, coefs_next)
- objective_var = np.maximum(objective_var, 0) # Shrink
- # Primal objective function
- penalisation = self.lambda_/self.q*(mixed_norm(coefs_next,
- self.p, self.q, n_samples, n_kernels)**self.q)
- loss = 0.5*np.sum(objective_var**2)
- objective_function = penalisation + loss
-
- # Dual objective function
- dual_var = objective_var
- if self.lambda_ != 0:
- dual_penalisation = dual_mixed_norm(np.dot(K.T,dual_var)/self.lambda_,
- n_samples, n_kernels, self.penalty)
- if self.q==1:
- # Fenchel conjugate of a mixed norm
- if dual_penalisation > 1:
- dual_var = dual_var / dual_penalisation
- # If we did not normalise, dual_penalisation
- # would be +infinity ...
- dual_penalisation = 0
- else:
- # Fenchel conjugate of a squared mixed norm
- dual_penalisation = self.lambda_/2*(dual_penalisation**2)
- else:
- dual_penalisation = 0
- dual_loss = -0.5*np.sum(dual_var**2) + np.sum(dual_var)
- # trace(np.dot(duat_var[:, np.newaxis], y)) au lieu du sum(dual_var) ?
- dual_objective_function = dual_loss - self.lambda_/self.q*dual_penalisation
- gap = abs(objective_function - dual_objective_function)
-
- if verbose:
- sys.stderr.write("Iteration : %d\r" % i )
- # print "iteration %d" % i
- self.iteration_dual_gap.append(gap)
- if i%1000 == 0:
- print("primal objective : %f, dual objective : %f, dual_gap : %f" % (objective_function, dual_objective_function, gap))
-
- if gap<=tol and i>10:
- print("convergence at iteration : %d" %i)
- break
-
- if verbose:
- print("dual gap : %f" % gap)
- print("objective_function : %f" % objective_function)
- print("dual_objective_function : %f" % dual_objective_function)
- print("dual_penalisation : %f" % dual_penalisation)
- print("dual_loss : %f" % dual_loss)
- self.coefs_ = coefs_next
- self.gap = gap
- self.objective_function = objective_function
- self.dual_objective_function = dual_objective_function
-
- return self
-
- def predict(self, K):
- """ Returns the prediction associated to the Kernels represented by K
-
- Parameters
- ----------
- K : ndarray
- ndarray of size (n_samples, n_kernels*n_samples) representing the kernels
-
- Returns
- -------
- ndarray : the prediction associated to K
- """
- if self.loss=='squared-hinge':
- res = np.sign(np.dot(K, self.coefs_))
- res[res==0] = 1
- return res
- else:
- return np.dot(K, self.coefs_)
-
- def score(self, K, y):
- """ Returns the score prediction for the given data
-
- Parameters
- ----------
- K : ndarray
- matrix of observations
-
- y : ndarray
- the labels correspondings to K
-
- Returns
- -------
- The percentage of good classification for K
- """
- if self.loss=='squared-hinge':
- return np.sum(np.equal(self.predict(K), y))*100./len(y)
- else:
- print("Score not yet implemented for regression\n")
- return None
-
- def info(self, K, y):
- """ For test purpose
-
- Parameters
- ----------
- K : 2D-array
- kernels
-
- y : ndarray
- labels
- Returns
- -------
- A dict of informations
- """
- result = Bunch()
- n_samples, n_kernels = self.n_samples, self.n_kernels
- nulled_kernels = 0
- nulled_coefs_per_kernel = list()
-
- for i in self.coefs_.reshape(n_kernels, n_samples):
- if len(i[i!=0]) == 0:
- nulled_kernels = nulled_kernels + 1
- nulled_coefs_per_kernel.append(len(i[i==0]))
-
- result['score'] = self.score(K, y)
- result['norms'] = by_kernel_norm(self.coefs_, self.p, self.q,
- n_samples, n_kernels)
- result['nulled_coefs'] = len(self.coefs_[self.coefs_==0])
- result['nulled_kernels'] = nulled_kernels
- result['nulled_coefs_per_kernel'] = nulled_coefs_per_kernel
- result['objective_function'] = self.objective_function
- result['dual_objective_function'] = self.dual_objective_function
- result['gap'] = self.gap
- result['auc_score'] = roc_auc_score(y, self.predict(K))
- result['lambda_'] = self.lambda_
-
- return result
diff --git a/modules/hp.py b/modules/hp.py
deleted file mode 100644
index aadb7c2..0000000
--- a/modules/hp.py
+++ /dev/null
@@ -1,205 +0,0 @@
-def hp(t, C, T, ahp, Methods, Index, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz, LCurve = False, Arrhenius = False):
- """Plots heatmap in ahp = [ahp1, ahp2] axes
- ahp1 for T vs Emission rates and
- ahp2 for Arrhenius or DLTS plots
-
- Parameters:
- -------------
- t : array
- Time domain data from experiment
- C : 2D array (len(t), len(T))
- Contains transients for temperatures 1, 2, ...
- [F1(t), F2(t),...] from experiment
- T : array
- Temperature from experiment
- ahp : list of matplotlib axes [ahp1, ahp2]
- Axes to plot heatplot and Arrhenius
- Methods : list
- Method names used for plotting
- Index : int
- Index to plot specific slice of heatplot
- Reg_L1, Reg_L2 : floats
- Reg. parameters for FISTA(L1) and L2 regularization
- Reg_C, Reg_S : floats
- Reg. parameters for CONTIN and reSpect algorithms
- Bounds : list
- [lowerbound, upperbound] of s domain points
- Nz : int
- Value which is length of calculated vector f(s)
- LCurve : boolean
- Plot using L-curve criteria if True
- """
-
- import numpy as np
- import matplotlib.pyplot as plt
-
- from matplotlib import cm
- from matplotlib import gridspec
-
- from L1_FISTA import l1_fista
- from L2 import L2
- from L1L2 import L1L2
- from contin import Contin
- from reSpect import reSpect
- from regopt import regopt
-
- import sys
-
- def progressbar(i, iterations):
- """Prints simple progress bar"""
- i = i + 1
- sys.stdout.write("[%-20s] %d%% Building Heatmap" % ('#'*np.ceil(i*100/iterations*0.2).astype('int'),
- np.ceil(i*100/iterations))+'\r')
- sys.stdout.flush()
-
- cut = len(T)
- cus = Nz
-
- if len(Methods) > 1:
- print('Choose only one Method')
- Methods = Methods[0]
-
- XZ = []
- YZ = []
- ZZ = []
-
- for M in Methods:
- if M == 'FISTA':
- for i in range(0, cut):
- YZ.append(np.ones(cus)*T[i])
- TEMPE, TEMPX, a = l1_fista(t, C[i], Bounds, Nz, Reg_L1)
- XZ.append(TEMPE)
- ZZ.append(TEMPX)
-
- progressbar(i, cut)
-
- elif M == 'L2':
- for i in range(0, cut):
- YZ.append(np.ones(cus)*T[i])
- TEMPE, TEMPX, a = L2(t, C[i], Bounds, Nz, Reg_L2)
- XZ.append(TEMPE)
- ZZ.append(TEMPX)
-
- progressbar(i, cut)
-
- elif M == 'L1+L2':
- for i in range(0, cut):
- YZ.append(np.ones(cus)*T[i])
- TEMPE, TEMPX, a = L1L2(t, C[i], Bounds, Nz, Reg_L1, Reg_L2)
- XZ.append(TEMPE)
- ZZ.append(TEMPX)
-
- progressbar(i, cut)
-
- elif M == 'Contin':
- for i in range(0, cut):
- YZ.append(np.ones(cus)*T[i])
- if LCurve:
- ay = 0
- Reg_C = regopt(t, C[i], ay, Methods, Reg_L1, Reg_L2,
- Reg_C, Reg_S, Bounds, Nz, LCurve)
- TEMPE, TEMPX, a = Contin(t, C[i], Bounds, Nz, Reg_C)
- #print(YZ[-1][0], 'K; a = ', Reg_C)
- XZ.append(TEMPE)
- ZZ.append(TEMPX*TEMPE)
-
- progressbar(i, cut)
-
- elif M == 'reSpect':
- for i in range(0, cut):
- YZ.append(np.ones(cus)*T[i])
- if LCurve:
- ay = 0
- Reg_S = regopt(t, C[i], ay, Methods, Reg_L1, Reg_L2,
- Reg_C, Reg_S, Bounds, Nz, LCurve)
- TEMPE, TEMPX, a = reSpect(t, C[i], Bounds, Nz, Reg_S)
- #print(YZ[-1][0], 'K; a = ', Reg_C)
- XZ.append(TEMPE)
- ZZ.append(TEMPX)
-
- progressbar(i, cut)
-
-
-
- XZ = np.asarray(XZ)
- YZ = np.asarray(YZ)
- ZZ = np.asarray(ZZ)
-
- ahp1, ahp2 = ahp[0], ahp[1]
-
- if Methods[0] == 'reSpect':
- v = np.abs(np.average(ZZ[10:-10,20:-20]))*20
- vmin, vmax = 0, v/2
- cmap = cm.jet
- levels = np.linspace(vmin, vmax, 20)
- #print(v)
-
- elif Methods[0] == 'Contin':
- v = np.abs(np.average(ZZ[10:-10,20:-20]))*10
- vmin, vmax = 0, v
- cmap = cm.jet
- levels = 20
-
- elif Methods[0] == 'FISTA' or Methods[0] == 'L2' or Methods[0] == 'L1+L2':
- v = np.abs(np.average(ZZ))*50
- vmin, vmax = -v, v
- cmap = cm.bwr
- levels = np.linspace(vmin, vmax, 20)
-
- #extent = [np.log10(Bounds[0]), np.log10(Bounds[1]), (T[-1]), (T[0])]
-
- x, y = np.meshgrid(TEMPE, T)
-
- ahp1.set_xlabel(r'Emission rate $e_{n,p}$, s')
- ahp1.set_title(Methods[0])
- ahp1.set_ylabel('Temperature T, K')
- ahp1.grid(True)
- #normalize = plt.Normalize(vmin = -v, vmax = v)
-
- heatmap = ahp1.contourf(x, y, ZZ, levels = levels, cmap=cmap, corner_mask = False,
- vmin = vmin, vmax = vmax, extend = 'both')
- plt.colorbar(heatmap)
- ahp1.set_xscale('log')
-
- if Arrhenius:
-
- ahp2.ticklabel_format(style='sci', axis='x', scilimits=(0,0), useOffset = False)
-
- arrh = ahp2.contourf(1/y, np.log(x*y**-2), ZZ, levels = levels, cmap=cmap,
- vmin = vmin, vmax = vmax, extend = 'both')
- #ahp2.set_xscale('log')
- ahp2.set_xlabel('Temperature $1/T$, $K^-1$')
- ahp2.set_ylabel('$\ln(e\cdot T^-2)$')
-
- else:
- ahp2.set_xlabel('Temperature, K')
- ahp2.set_ylabel('LDLTS signal, arb. units')
- for i in range(int(len(TEMPE)*0.1), int(len(TEMPE)*0.8), 20):
- # ad.plot(T, ZZ[:, i], label=r'$\tau = %.3f s$'%(1/TEMPE[i]))
- ahp2.plot(T, ZZ[:, i]/np.amax(ZZ[:,i]), label=r'$\tau = %.3f s$'%(1/TEMPE[i]))
- #ahp2.set_yscale('log')
- #ahp2.set_ylim(1E-4, 10)
- ahp2.grid()
- ahp2.legend()
-
- plt.show()
- plt.tight_layout()
-
- ##save file
- #Table = []
- #Table.append([0] + (1/TEMPE).tolist())
- #for i in range(cut):
- # Table.append([T[i]] + (ZZ[i,:]).tolist())
-
- Table = []
- e = 1/TEMPE
- Table.append([0] + e.tolist())
- for i in range(cut):
- Table.append([T[i]] + (ZZ[i,:]).tolist())
-
-
- #print(Table)
- #Table = np.asarray(Table)
-
- np.savetxt('processed/NEW-FILE'%((t[1]-t[0])*1000) +'_1'+'.LDLTS',
- Table, delimiter='\t', fmt = '%4E')
diff --git a/modules/interface.py b/modules/interface.py
deleted file mode 100644
index e6d3a49..0000000
--- a/modules/interface.py
+++ /dev/null
@@ -1,142 +0,0 @@
-def interface(path):
- """Initiates and displays widgets from iPyWigets
- and sends data to demo() with interactive_output()
- """
-
- import numpy as np
- import ipywidgets as widgets
-
- from ipywidgets import interact, interactive, fixed, interact_manual, HBox, VBox, Label
-
- from read_file import read_file
- from demo import demo
-
- import warnings
- warnings.filterwarnings('ignore')
-
- interface.path = path
-
-
- t, C, T = read_file(path)
-
- if len(T.shape) != 0:
- cut = len(T) - 1
- else:
- cut = 1
- Index = widgets.IntSlider(
- value=0,
- min=0, # max exponent of base
- max=cut, # min exponent of base
- step=1, # exponent step
- description='')
-
- Methods = widgets.SelectMultiple(
- options = ['FISTA', 'L2', 'L1+L2', 'Contin', 'reSpect'],
- value = ['Contin'],
- #rows = 10,
- description = 'Methods:',
- disabled = False)
-
- Nz = widgets.IntText(
- value=100,
- description=r'Nf =',
- disabled=False)
-
- Reg_L1 = widgets.FloatLogSlider(
- value=1e-8,
- base=10,
- min=-10, # max exponent of base
- max=1, # min exponent of base
- step=0.2, # exponent step
- description=r'FISTA: λ1')
-
- Reg_L2 = widgets.FloatLogSlider(
- value=1e-8,
- base=10,
- min=-10, # max exponent of base
- max=1, # min exponent of base
- step=0.2, # exponent step
- description=r'L2: λ2')
-
- Reg_C = widgets.FloatLogSlider(
- value=1E-1,
- base=10,
- min=-8, # max exponent of base
- max=2, # min exponent of base
- step=0.2, # exponent step
- description=r'Contin: λC')
-
- Reg_S = widgets.FloatLogSlider(
- value=1E-2,
- base=10,
- min=-12, # max exponent of base
- max=2, # min exponent of base
- step=0.2, # exponent step
- description=r'reSpect: λS')
-
- Bounds = widgets.IntRangeSlider(
- value=[-2, 2],
- min=-5,
- max=5,
- step=1,
- description=r'10a to 10b:',
- disabled=False,
- continuous_update=False,
- orientation='horizontal',
- readout=True,
- readout_format='d')
-
- dt = widgets.BoundedFloatText(
- value=150,
- min=0,
- max=1000,
- step=1,
- description='Time step, ms',
- disabled=False)
-
- Plot = widgets.ToggleButton(
- value=True,
- description='Hide graphics?',
- disabled=False,
- button_style='success', # 'success', 'info', 'warning', 'danger' or ''
- tooltip='Hides graphics',
- icon='eye-slash')
-
- Residuals = widgets.ToggleButton(
- value=False,
- description='Compute L-curve?',
- disabled=False,
- button_style='info', # 'success', 'info', 'warning', 'danger' or ''
- tooltip='Plots L-curve to choose best value of regularization parameter of L2 reg. method',
- icon='calculator')
-
- LCurve = widgets.Checkbox(
- value = False,
- description = 'Use L-Curve optimal?',
- disabled = False)
-
- Arrhenius = widgets.Checkbox(
- value = False,
- description = 'Draw Arrhenius instead DLTS?',
- disabled = False)
-
- Heatplot = widgets.ToggleButton(
- value=False,
- description='Plot heatmap?',
- disabled=False,
- button_style='warning', # 'success', 'info', 'warning', 'danger' or ''
- tooltip='Plots heatmap of data from chosen file',
- icon='braille')
-
-
- left_box = VBox([Methods, Nz, dt])
- centre_box = VBox([Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds])
- right_box = VBox([LCurve, Arrhenius, Plot, Residuals, Heatplot])
- ui = widgets.HBox([left_box, centre_box, right_box])
- Slider = widgets.HBox([Label('Transient №'),Index])
- out = widgets.interactive_output(demo, {'Index':Index, 'Nz':Nz,
- 'Reg_L1':Reg_L1, 'Reg_L2':Reg_L2, 'Reg_C':Reg_C, 'Reg_S':Reg_S,
- 'Bounds':Bounds, 'dt':dt, 'Methods':Methods,
- 'Plot':Plot, 'LCurve':LCurve, 'Arrhenius':Arrhenius,
- 'Residuals':Residuals, 'Heatplot': Heatplot})
- display(ui, Slider, out)
diff --git a/modules/laplace.py b/modules/laplace.py
deleted file mode 100644
index f7ed015..0000000
--- a/modules/laplace.py
+++ /dev/null
@@ -1,59 +0,0 @@
-def laplace(t, F, Nz, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Methods):
-
- """ Initiates routines for chosen method
-
- Parameters:
- -------------
- t : array
- Time domain data from experiment
- F : array,
- Transient data from experiment F(t)
- Nz : int
- Number of points z to compute, must be smaller than len(F)
- Reg_L1, Reg_L2 : floats
- Reg. parameters for FISTA(L1) and L2 regularization
- Reg_C, Reg_S : floats
- Reg. parameters for CONTIN and reSpect algorithms
- Bounds : list
- [lowerbound, upperbound] of s domain points
- Methods : list
- Names of processing methods
-
- Returns:
- -------------
- data : list of [[s, f, F_restored, Method1], ...]
- Data list for processed data
-
- """
- import numpy as np
- from L1_FISTA import l1_fista
- from L2 import L2
- from L1L2 import L1L2
- from contin import Contin
- from reSpect import reSpect
-
- data = []
-
- for i in Methods:
- if i == 'FISTA':
- s, f, F_hat = l1_fista(t, F, Bounds, Nz, Reg_L1)
- data.append([s, f, F_hat, 'FISTA'])
-
- elif i == 'L2':
- s, f, F_hat = L2(t, F, Bounds, Nz, Reg_L2)
- data.append([s, f, F_hat, 'L2'])
-
- elif i == 'L1+L2':
- s, f, F_hat = L1L2(t, F, Bounds, Nz, Reg_L1, Reg_L2)
- data.append([s, f, F_hat, 'L1+L2'])
-
- elif i == 'Contin':
- s, f, F_hat = Contin(t, F, Bounds, Nz, Reg_C)
- data.append([s, f, F_hat, 'Contin'])
-
- elif i == 'reSpect':
- s, f, F_hat = reSpect(t, F, Bounds, Nz, Reg_S)
- data.append([s, f, F_hat, 'reSpect'])
-
- data = np.asarray(data)
- return data
diff --git a/modules/plot_data.py b/modules/plot_data.py
deleted file mode 100644
index a3466df..0000000
--- a/modules/plot_data.py
+++ /dev/null
@@ -1,89 +0,0 @@
-def plot_data(t, F, data, T, Index):
- """Gets data from demo() and plots it:
-
- Parameters:
- -------------
- t : array
- Time domain data from experiment
- F : array,
- Transient data from experiment F(t)
- data : list of [[s, f, F_restored, Method1], ...]
- Data list for processed data
- T : float
- Temperature value of certain transient
- Index : int
- Index of transient in initial dataset (not data)
-
- Returns:
- -------------
- ay : matplotlib axes
- Axes for L-Curve plotting
- [ahp1, ahp2] : list of matplotlib axes
- Axes for its Arrhenuis or DLTS plots in hp()
- """
-
- import numpy as np
- import matplotlib.pyplot as plt
-
- ## Plotting main plot f(s)
- fig = plt.figure(constrained_layout=True, figsize = (9.5,11))
- widths = [0.5, 0.5]
- heights = [0.3, 0.3, 0.4]
- spec = fig.add_gridspec(ncols=2, nrows=3, width_ratios=widths,
- height_ratios=heights)
-
-
- ax = fig.add_subplot(spec[0,:])
- ax.set_title(r'Temperature %.2f K'%T[Index])
- ax.set_ylabel(r'Amplitude, arb. units')
- ax.set_xlabel(r'Emission rate, $s^{-1}$')
- ax.set_xscale('log')
- ax.grid(True, which = "both", ls = "-")
- #print(data[:,2])
- for i, e in enumerate(data[:,-1]):
- if e == 'FISTA':
- ax.plot(data[i][0], data[i][1], 'r-', label = e)
- elif e == 'L2':
- ax.plot(data[i][0], data[i][1], 'b-', label = e)
- elif e == 'L1+L2':
- ax.plot(data[i][0], data[i][1], 'm-', label = e)
- elif e == 'Contin':
- ax.plot(data[i][0], data[i][1]*data[i][0], 'c-', label = e)
- elif e == 'reSpect':
- ax.plot(data[i][0], data[i][1], 'y-', label = e)
- ax.legend()
-
- # Axes for L-Curve
- ay = fig.add_subplot(spec[1, 0])
-
- # Plotting transients F(t)
- az = fig.add_subplot(spec[1, 1])
- az.set_ylabel(r'Transient , arb. units')
- az.set_xlabel(r'Time $t$, $s$')
- az.grid(True, which = "both", ls = "-")
- az.plot(t, F, 'ks-', label = 'Original')
- az.set_xscale('log')
- for i, e in enumerate(data[:,-1]):
- if e == 'FISTA':
- d = data[i][2]
- az.plot(t, d, 'ro-', label = e)
- elif e == 'L2':
- d = data[i][2][:-1] # last point sucks
- az.plot(t[:-1], d, 'b>-', label = e)
- elif e == 'L1+L2':
- d = data[i][2]
- az.plot(t, d, 'm*-', label = e)
- elif e == 'Contin':
- d = data[i][2]
- az.plot(t, d, 'cx-', label = e)
- elif e == 'reSpect':
- d = data[i][2]
- az.plot(t, d - d[-1] + F[-1], 'y+-', label = e)
- az.legend()
-
-
- plt.tight_layout()
-
- ahp1, ahp2 = fig.add_subplot(spec[2, 0]), fig.add_subplot(spec[2, 1])
-
- return ay, [ahp1, ahp2]
diff --git a/modules/reSpect.py b/modules/reSpect.py
deleted file mode 100644
index 9601916..0000000
--- a/modules/reSpect.py
+++ /dev/null
@@ -1,315 +0,0 @@
-import numpy as np
-
-from scipy.interpolate import interp1d
-from scipy.integrate import cumtrapz, quad
-from scipy.optimize import nnls, minimize, least_squares
-
-def Initialize_f(F, s, kernMat, *argv):
- """
- Computes initial guess for f and then call get_f()
- to return regularized solution:
-
- argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
-
- Parameters:
- ------------
- F : array
- Transient experimental data
- s : array
- Tau-domain points
- kernMat : matrix (len(s), len(F))
- Matrix of inverse Laplace transform
- F_b : float
- Baseline value, optional
-
- Returns:
- -----------
- flam : array
- Regularized inverse of Laplace transform
- F_b : float
- Baseline value
- """
-
- f = -5.0 * np.ones(len(s)) + np.sin(np.pi * s) # initial guess for f
- lam = 1e0
-
- if len(argv) > 0:
- F_b = argv[0]
- flam, F_b = get_f(lam, F, f, kernMat, F_b)
- return flam, F_b
- else:
- flam = get_f(lam, F, f, kernMat)
- return flam
-
-def get_f(lam, F, f, kernMat, *argv):
- """
- Solves following equation for f. Uses jacobianLM() with
- scipy.optimize.least_squares() solver:
-
- argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
-
- Parameters:
- -------------
- lambda : float
- Regularization parameter
- F : array
- Transient experimental data
- f : array
- Guessed f(s) for emission rates
- kernMat : matrix (len(f), len(F))
- Matrix of inverse Laplace transform
- F_b : float
- Baseline value,optional
-
- Returns:
- -------------
- flam : array
- Regularized solution
- F_b : float
- Baseline for solution
- """
-
- # send fplus = [f, F_b], on return unpack f and F_b
- if len(argv) > 0:
- fplus= np.append(f, argv[0])
- res_lsq = least_squares(residualLM, fplus, jac=jacobianLM, args=(lam, F, kernMat))
- return res_lsq.x[:-1], res_lsq.x[-1]
-
- # send normal f, and collect optimized f back
- else:
- res_lsq = least_squares(residualLM, f, jac=jacobianLM, args=(lam, F, kernMat))
- return res_lsq.x
-
-def residualLM(f, lam, F, kernMat):
- """
- Computes residuals for below equation
- and used with scipy.optimize.least_squares():
-
- argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
-
- Parameters:
- -------------
- f : array
- Solution for above equation f(s)
- lambda : float
- Regularization parameter
- F : array
- Experimental transient data
- kernMat : matrix (len(f), len(F))
- Matrix of inverse Laplace transform
- F_b : float
- Plateau value for F experimental data
-
- Returns:
- -----------
- r : array
- Residuals (||F - kernel(f)||2)''
- """
-
- n = kernMat.shape[0];
- ns = kernMat.shape[1];
- nl = ns - 2;
-
- r = np.zeros(n + nl);
-
- # if plateau then unfurl F_b
- if len(f) > ns:
- F_b = f[-1]
- f = f[:-1]
- r[0:n] = (1. - kernel_prestore(f, kernMat, F_b)/F) # same as |F - kernel(f)|
- else:
- r[0:n] = (1. - kernel_prestore(f, kernMat)/F) # same as |F - kernel(f)| w/o F_b
-
- r[n:n+nl] = np.sqrt(lam) * np.diff(f, n=2) # second derivative
-
- return r
-
-def jacobianLM(f, lam, F, kernMat):
- """
- Computes jacobian for scipy.optimize.least_squares()
-
- argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
-
- Parameters:
- -------------
- f : array
- Solution for above equation
- lam : float
- Regularization parameter
- F : array
- Experimental transient data
- kernMat : matrix (len(f), len(F))
- Matrix of inverse Laplace transform
-
- Returns:
- ------------
- Jr : matrix (len(f)*2 - 2, len(F) + 1)
- Contains Jr(i, j) = dr_i/df_j
- """
-
- n = kernMat.shape[0];
- ns = kernMat.shape[1];
- nl = ns - 2;
-
- # L is a ns*ns tridiagonal matrix with 1 -2 and 1 on its diagonal;
- L = np.diag(np.ones(ns-1), 1) + np.diag(np.ones(ns-1),-1) + np.diag(-2. * np.ones(ns))
- L = L[1:nl+1,:]
-
- # Furnish the Jacobian Jr (n+ns)*ns matrix
- Kmatrix = np.dot((1./F).reshape(n,1), np.ones((1,ns)));
-
- if len(f) > ns:
-
- F_b = f[-1]
- f = f[:-1]
-
- Jr = np.zeros((n + nl, ns+1))
-
- Jr[0:n, 0:ns] = -kernelD(f, kernMat) * Kmatrix;
- Jr[0:n, ns] = -1./F # column for dr_i/dF_b
-
- Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
- Jr[n:n+nl, ns] = np.zeros(nl) # column for dr_i/dF_b = 0
-
- else:
-
- Jr = np.zeros((n + nl, ns))
-
- Jr[0:n, 0:ns] = -kernelD(f, kernMat) * Kmatrix;
- Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
-
- return Jr
-
-def kernelD(f, kernMat):
- """
- Helper for jacobianLM() approximates dK_i/df_j = K * e(f_j)
-
- argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
-
- Parameters:
- -------------
- f : array
- Solution for above equation
- kernMat : matrix (len(f), len(F))
- Matrix of inverse Laplace transform
-
- Returns:
- -------------
- DK : matrix (len(f), len(F))
- Jacobian
- """
-
- n = kernMat.shape[0];
- ns = kernMat.shape[1];
-
- # A n*ns matrix with all the rows = f'
- fsuper = np.dot(np.ones((n,1)), np.exp(f).reshape(1, ns))
- DK = kernMat * fsuper
-
- return DK
-
-def getKernMat(s, t):
- """
- Mesh grid for s and t domain to construct
- kernel matrix
-
- Parameters:
- -------------
- s: array
- Tau domain points
- t: array
- Time domain points from experiment
-
- Returns:
- -------------
- np.exp(-T/S) * hsv : matrix (len(s), len(t))
- Matrix of inverse Laplace transform, where hsv
- trapezoidal coefficients
- """
- ns = len(s)
- hsv = np.zeros(ns);
- hsv[0] = 0.5 * np.log(s[1]/s[0])
- hsv[ns-1] = 0.5 * np.log(s[ns-1]/s[ns-2])
- hsv[1:ns-1] = 0.5 * (np.log(s[2:ns]) - np.log(s[0:ns-2]))
- S, T = np.meshgrid(s, t);
-
- return np.exp(-T/S) * hsv;
-
-def kernel_prestore(f, kernMat, *argv):
- """
- Function for prestoring kernel
-
- argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
-
- Parameters:
- -------------
- f : array
- Solution of above equation
- kernMat : matrix (len(f), len(F))
- Matrix of inverse Laplace transform
-
- Returns:
- ------------
- np.dot(kernMat, np.exp(f)) + F_b :
- Stores kernMat*(f)+ F_b
- """
-
- if len(argv) > 0:
- F_b = argv[0]
- else:
- F_b = 0.
-
- return np.dot(kernMat, np.exp(f)) + F_b
-
-
-def reSpect(t, F, bound, Nz, alpha):
- """
- Main routine to implement reSpect algorithm from [1].
-
- [1] Shanbhag, S. (2019)
-
- Parameters:
- --------------
- t : array
- Time domain points from experiment
- F : array
- Experimental transient F(t)
- bound : list
- [lowerbound, upperbound] of bounds for tau-domain points
- Nz : int
- Length of tau-domain array
- alpha : float
- Regularization parameter
-
- Returns:
- --------------
- 1/s[::-1] : array
- Tau-domain points
- np.exp(f)[::-1] : array
- Inverse Laplace transform of F(t)
- kernMat@np.exp(f)[::] : array
- Restored transient F_restored(t)
- """
-
- n = len(t)
- ns = Nz # discretization of 'tau'
-
- tmin = t[0];
- tmax = t[n-1];
-
- smin, smax = 1/bound[1], 1/bound[0] # s is tau domain points!
-
- hs = (smax/smin)**(1./(ns-1))
- s = smin * hs**np.arange(ns) # s here is tau domain points
-
- kernMat = getKernMat(s, t)
-
- fgs, F_b = Initialize_f(F, s, kernMat, np.min(F))
-
- alpha = alpha
-
- f, F_b = get_f(alpha, F, fgs, kernMat, F_b);
-
- K = kernel_prestore(f, kernMat, F_b);
-
- return 1/s[::-1], np.exp(f)[::-1], kernMat@np.exp(f)[::]
diff --git a/modules/read_file.py b/modules/read_file.py
deleted file mode 100644
index 14fdf4b..0000000
--- a/modules/read_file.py
+++ /dev/null
@@ -1,81 +0,0 @@
-def read_file(Path, dt=150, proc = True):
- """Returns data from file
-
- Parameters:
- --------------
- Path : str
- Path to file
- dt : float
- Step between time points in ms
- proc: boolean
- Process transient if True
-
- Returns:
- --------------
- time : array
- Time domain points in s
- C : array of [F1(time), F2(time), ...]
- Contains transients experimental data
- for range of temperatures
- T : array
- Contains experimental temperature points
- """
- import numpy as np
-
- def process(C, proc):
- """Returns processed transient C_p if proc is True"""
-
- def get_Baseline(C):
- """Returns baseline of transient C"""
- l = len(C)
- c1, c2, c3 = C[0], C[int(l/2)-1], C[l-1]
- return (c1*c3 - c2**2)/(c1+c3 - 2*c2)
-
- if proc:
- C_p = C
- for i, _C in enumerate(C):
- F = _C
- F = F/np.average(F[-1])
- if F[0] > F[-1]:
- F = F - min(F)
- else:
- F = F - max(F)
- F = np.abs(F)
- F = F + np.average(F)*2
- F = F - get_Baseline(F)*0
- C_p[i] = F
- return np.asarray(C_p)
-
- else:
- C = C/C[-1]
- return C + np.average(C)*2
-
- Path = str(Path)
-
- txt = np.genfromtxt(Path, delimiter='\t')
- if len(txt.shape) == 2:
- T = txt[:,0]
- cut = len(T)
-
- C = []
- time = []
- for i in range(0,cut):
- C.append(txt[i][3:-2])
-
- for i in range(0, len(C[0])):
- time.append(dt/1000*(i+1))
- else:
- T = txt[0]
- C = txt[1:]
- time = np.arange(dt, dt*len(C), dt)*1e-3
-
-
-
-
- C = np.asarray(C)
- C = process(C, proc)
- time = np.asarray(time)
- T = np.asarray(T)
- #print(time)
-
- return time, C, T
diff --git a/modules/regopt.py b/modules/regopt.py
deleted file mode 100644
index eab4412..0000000
--- a/modules/regopt.py
+++ /dev/null
@@ -1,189 +0,0 @@
-def regopt(t, F, ay, Methods, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz, LCurve = False):
- """
- Computes L-curve from residual and solution norm
- and derives optimal regularization parameter from
- its curvature plot. (max(k) -> a_opt)
-
- Parameters:
- -------------
- t : array
- Time domain data from experiment
- F : array,
- Transient data from experiment F(t)
- ay : matplotlib axes
- Axes for L-curve plotting
- Methods : list
- Names of processing methods
- Reg_L1, Reg_L2 : floats
- Reg. parameters for FISTA(L1) and L2 regularization
- Reg_C, Reg_S : floats
- Reg. parameters for CONTIN and reSpect algorithms
- Bounds : list
- [lowerbound, upperbound] of emission rates domain points
- Nz : int
- Number of points in emissior rates domain to compute,
- must be smaller than len(F)
- LCurve : boolean
- If True regopt() returns optimal regularization
- paremeter for chosen method
-
- Returns:
- -------------
- alpha[i] : float
- Optimal reg. parameter for chosen method
- using L-curve criteria
- """
- from laplace import laplace
- #from matplotlib.cm import jet
- import matplotlib.pyplot as plt
- import numpy as np
- from scipy.signal import savgol_filter
-
- import sys
-
- def progressbar(i, iterations):
- """Prints simple progress bar"""
- i = i + 1
- sys.stdout.write("[%-20s] %d%% Building L-Curve" % ('#'*np.ceil(i*100/iterations*0.2).astype('int'),
- np.ceil(i*100/iterations))+'\r')
- sys.stdout.flush()
-
- def curvature(x, y, a):
- """Returns curvature of line defined by:
-
- k = (x'*y''-x''*y')/((x')^2+(y')^2)^(3/2)
-
- Parameters:
- -------------
- x, y : arrays of x and y respectively
- a : array of reg parameters
-
- Returns:
- -------------
- k : array of curvature values
- """
- x = savgol_filter(x, 13, 1)
- y = savgol_filter(y, 13, 1)
- da = np.gradient(a)
- f_x = np.gradient(x)/da
- f_y = np.gradient(y)/da
- f_xx = np.gradient(f_x)/da
- f_yy = np.gradient(f_y)/da
-
- k = (f_x*f_yy - f_xx*f_y)/(f_x**2 + f_y**2)**(3/2)
- return savgol_filter(k, 5, 1)
- #return k
-
- res = [] # residuals norm ||Cf - F||2
- sol = [] # solution norm ||f||2
-
- alpha_L2 = 10**np.linspace(np.log10(Reg_L2) - 3, np.log10(Reg_L2) + 3, 40)
- alpha_C = 10**np.linspace(np.log10(Reg_C) - 3, np.log10(Reg_C) + 3, 40)
- alpha_S = 10**np.linspace(np.log10(Reg_S) - 3, np.log10(Reg_S) + 3, 40)
-
- if LCurve:
- alpha_C = 10**np.linspace(np.log10(Reg_C) - 3, np.log10(Reg_C) + 3, 40)
- alpha = alpha_C
-
- data = []
-
- Fx = F
-
- for i in Methods:
-
- if len(Methods) > 1:
- print('!!!Choose only one Method!!!')
- break
-
- if i == 'FISTA':
- break
-
- elif i == 'L2':
- for j, v in enumerate(alpha_L2):
- data = laplace(t, F, Nz, Reg_L1, v, Reg_C, Reg_S, Bounds, Methods)
- e, f, F_restored = data[0][0], data[0][1], data[0][2]
-
- res.append(np.linalg.norm(np.abs(Fx) - np.abs(F_restored), ord = 2)**2)
- sol.append(np.linalg.norm(f, ord = 2)**2)
- progressbar(j, len(alpha_L2))
- alpha = alpha_L2
- break
-
- elif i == 'L1+L2':
- break
-
- elif i == 'Contin':
- for j, v in enumerate(alpha_C):
- data = laplace(t, F, Nz, Reg_L1, Reg_L2, v, Reg_S, Bounds, Methods)
- e, f, F_restored = data[0][0], data[0][1], data[0][2]
-
- res.append(np.linalg.norm(np.abs(Fx) - np.abs(F_restored), ord = 2)**2)
- #sol.append(np.linalg.norm(f, ord = 2)**2)
- sol.append(np.linalg.norm(f*e, ord = 2)**2)
- progressbar(j, len(alpha_C))
- alpha = alpha_C
- break
-
- elif i == 'reSpect':
- for j, v in enumerate(alpha_S):
- data = laplace(t, F, Nz, Reg_L1, Reg_L2, Reg_C, v, Bounds, Methods)
- e, f, F_restored = data[0][0], data[0][1], data[0][2]
-
- res.append(np.linalg.norm(np.abs(Fx - Fx[-1]) - np.abs(F_restored - F_restored[-1]), ord = 2)**2)
- sol.append(np.linalg.norm(f, ord = 2)**2)
- progressbar(j, len(alpha_S))
- alpha = alpha_S
- break
-
- # Plotting L-curve and its normalized curvature
-
- if len(data) == 0:
- ay.annotate(text = 'Choose only one method \n Contin, reSpect or L2',
- xy = (0.5,0.5), ha="center", size = 16)
- plt.tight_layout()
- elif LCurve:
- k = curvature(np.log10(res), np.log10(sol), alpha)
- k_max = np.amax(k)
- if Methods[0] == 'reSpect':
- i = np.where(k == np.amax(k[1:-1]))
- else:
- i = np.where(k == np.amax(k[1:-1]))
- i = np.squeeze(i)
- return alpha[i]
- else:
- k = curvature(np.log10(res), np.log10(sol), alpha)
- k_max = np.amax(k)
- i = np.where(k == np.amax(k))
- if Methods[0] != 'reSpect':
- i = np.where(k == np.amax(k[1:-1]))
- i = np.squeeze(i)
-
- ay.plot(np.log10(res), np.log10(sol), 'k-', )
-
- ay.set_ylabel(r'Solution norm $\lg||x||^2_2$', c='k')
- ay.set_xlabel(r'Residual norm $\lg||\eta-Cx||^2_2$', c='k')
-
- ay_k = ay.twinx()
- ay_k_t = ay_k.twiny()
- ay_k_t.set_xscale('log')
- ay_k_t.plot(alpha, k/k[i], 'r-')
- ay_k.set_ylabel(r'Curvature, arb. units', c='r')
- ay_k.set_ylim(-0.1, 1.1)
- #ay_k.set_yscale('log')
- #ay_k.set_ylim(1e-3, 2.0)
- ay_k_t.set_xlabel(r'Reg. parameter $\lambda_{%.s}$'%(Methods[0]), c='r')
-
- ay_k_t.spines['top'].set_color('red')
- ay_k_t.spines['right'].set_color('red')
- ay_k_t.xaxis.label.set_color('red')
- ay_k_t.tick_params(axis='x', colors='red', which='both')
- ay_k.yaxis.label.set_color('red')
- ay_k.tick_params(axis='y', colors='red', which='both')
-
- # Draw maximal curvature point of L-curve
- ay_k_t.plot(alpha[i], k[i]/k[i], 'r*')
- ay.plot(np.log10(res[i]), np.log10(sol[i]), 'r*') #highlight optimal lambda
- ay.annotate(r"$\lambda_\mathrm{opt} =$ %.1e"%alpha[i], c = 'k',
- xy = (np.log10(res[i])*0.98, np.log10(sol[i])*0.98), ha = 'left')
-
- plt.tight_layout()
From 81f326c3345936973056c26e5b0e643c0c421133 Mon Sep 17 00:00:00 2001
From: Anton Vasilev <31480657+nocliper@users.noreply.github.com>
Date: Thu, 2 Nov 2023 16:13:42 +0300
Subject: [PATCH 2/3] for review
---
modules/L1L2.py | 67 ++++
modules/L1_FISTA.py | 21 ++
modules/L2.py | 67 ++++
modules/contin.py | 131 ++++++++
modules/demo.py | 63 ++++
modules/filebrowser.py | 53 ++++
modules/fista.py | 685 +++++++++++++++++++++++++++++++++++++++++
modules/hp.py | 205 ++++++++++++
modules/interface.py | 142 +++++++++
modules/laplace.py | 59 ++++
modules/plot_data.py | 89 ++++++
modules/reSpect.py | 315 +++++++++++++++++++
modules/read_file.py | 81 +++++
modules/regopt.py | 189 ++++++++++++
14 files changed, 2167 insertions(+)
create mode 100644 modules/L1L2.py
create mode 100644 modules/L1_FISTA.py
create mode 100644 modules/L2.py
create mode 100644 modules/contin.py
create mode 100644 modules/demo.py
create mode 100644 modules/filebrowser.py
create mode 100644 modules/fista.py
create mode 100644 modules/hp.py
create mode 100644 modules/interface.py
create mode 100644 modules/laplace.py
create mode 100644 modules/plot_data.py
create mode 100644 modules/reSpect.py
create mode 100644 modules/read_file.py
create mode 100644 modules/regopt.py
diff --git a/modules/L1L2.py b/modules/L1L2.py
new file mode 100644
index 0000000..25167f0
--- /dev/null
+++ b/modules/L1L2.py
@@ -0,0 +1,67 @@
+""" Module to implement L1 and/or L2 regularization with
+SciPy linear_models module
+"""
+
+def L1L2(t, F, bound, Nz, alpha1, alpha2, iterations = 10000):
+ """
+ Returns solution using mixed L1 and/or L2 regularization
+ with simple gradient descent
+
+ F(t) = ∫f(s)*exp(-s*t)ds
+
+ or
+
+ min = ||C*f - F||2 + alpha1*||I*f||1 + alpha2*||I*f||2
+
+ Parameters:
+ ------------
+ t : array
+ Time domain data from experiment
+ F : array,
+ Transient data from experiment F(t)
+ bound : list
+ [lowerbound, upperbound] of s domain points
+ Nz : int
+ Number of points z to compute, must be smaller than len(F)
+ alpha1, alpha 2 : floats
+ Regularization parameters for L1 and L2 regularizers
+ iterations : int
+ Maximum number of iterations. Optional
+
+
+ Returns:
+ ------------
+ s : array
+ Emission rates domain points (evenly spaced on log scale)
+ f : array
+ Inverse Laplace transform f(s)
+ F_restored : array
+ Reconstructed transient from C@f + intercept
+ """
+
+ import numpy as np
+ from scipy.sparse import diags
+ from sklearn.linear_model import ElasticNet
+
+ # set up grid points (# = Nz)
+ h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
+ s = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
+
+ # construct C matrix from [1]
+ s_mesh, t_mesh = np.meshgrid(s, t)
+ C = np.exp(-t_mesh*s_mesh)
+ C[:, 0] /= 2.
+ C[:, -1] /= 2.
+ C *= h
+
+ alpha = alpha1 + alpha2
+ l1_ratio = alpha1/alpha
+
+ model = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, tol = 1e-12,
+ fit_intercept = True, max_iter = iterations)
+ model.fit(C, F)
+
+ f = model.coef_
+
+ F_restored = C@f + model.intercept_
+ return s, f, F_restored
\ No newline at end of file
diff --git a/modules/L1_FISTA.py b/modules/L1_FISTA.py
new file mode 100644
index 0000000..5c47079
--- /dev/null
+++ b/modules/L1_FISTA.py
@@ -0,0 +1,21 @@
+from fista import Fista
+import numpy as np
+
+def l1_fista(t, F, bound, Nz, alpha, iterations = 50000, penalty = 'l11'):
+
+ "Function to implement FISTA algorithm"
+
+ h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
+ z = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
+
+ z_mesh, t_mesh = np.meshgrid(z, t)
+ C = np.exp(-t_mesh*z_mesh) # specify integral kernel
+ C[:, 0] /= 2.
+ C[:, -1] /= 2.
+ C *= h
+
+ fista = Fista(loss='least-square', penalty = penalty, lambda_=alpha, n_iter = iterations)
+ fista.fit(C, F)
+
+
+ return z, fista.coefs_, fista.predict(C)
diff --git a/modules/L2.py b/modules/L2.py
new file mode 100644
index 0000000..fc75be7
--- /dev/null
+++ b/modules/L2.py
@@ -0,0 +1,67 @@
+"""Module to implement routines of numerical inverse
+Laplace tranform using L2 regularization algorithm
+"""
+
+import numpy as np
+from scipy.sparse import diags
+
+def L2(t, F, bound, Nz, alpha):
+ """
+ Returns solution for problem imposing L2 regularization
+
+ F(t) = ∫f(s)*exp(-s*t)ds
+
+ or
+
+ min = ||C*f - F||2 + alpha*||I*f||2
+
+
+ Parameters:
+ ------------
+ t : array
+ Time domain data from experiment
+ F : array,
+ Transient data from experiment F(t)
+ bound : list
+ [lowerbound, upperbound] of s domain points
+ Nz : int
+ Number of points z to compute, must be smaller than len(F)
+ alpha : float
+ Regularization parameters for L2 regularizers
+ iterations : int
+ Maximum number of iterations. Optional
+
+
+ Returns:
+ ------------
+ s : array
+ Emission rates domain points (evenly spaced on log scale)
+ f : array
+ Inverse Laplace transform f(s)
+ F_restored : array
+ Reconstructed transient from C@f + intercept
+ """
+
+ # set up grid points (# = Nz)
+ h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
+ s = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
+
+ # construct C matrix from [1]
+ s_mesh, t_mesh = np.meshgrid(s, t)
+ C = np.exp(-t_mesh*s_mesh)
+ C[:, 0] /= 2.
+ C[:, -1] /= 2.
+ C *= h
+
+ l2 = alpha
+
+ data = [-2*np.ones(Nz), 1*np.ones(Nz), 1*np.ones(Nz)]
+ positions = [-1, -2, 0]
+ I = diags(data, positions, (Nz+2, Nz)).toarray()
+ #I = np.identity(Nz)
+
+ f = np.linalg.solve(l2*np.dot(I.T,I) + np.dot(C.T,C), np.dot(C.T,F))
+
+ F_restored = C@f
+
+ return s, f, F_restored#, res_norm, sol_norm
diff --git a/modules/contin.py b/modules/contin.py
new file mode 100644
index 0000000..24ee1ee
--- /dev/null
+++ b/modules/contin.py
@@ -0,0 +1,131 @@
+"""Module to implement routines of numerical inverse
+Laplace tranform using Contin algorithm [1]
+
+[1] Provencher, S. (1982)
+"""
+
+from __future__ import division
+import numpy as np
+
+def Contin(t, F, bound, Nz, alpha):
+ """
+ Function returns processed data f(z) from the equation
+
+ F(t) = ∫f(z)*exp(-z*t)
+
+ Parameters:
+ --------------
+ t : array
+ Time domain data from experiment
+ F : array,
+ Transient data from experiment F(t)
+ bound : list
+ [lowerbound, upperbound] of z domain points
+ Nz : int
+ Number of points z to compute, must be smaller than len(F)
+ alpha : float
+ Regularization parameter
+
+ Returns:
+ --------------
+ z : array
+ Emission rates domain points (evenly spaced on log scale)
+ f : array
+ Inverse Laplace transform f(z)
+ F_restored : array
+ Reconstructed transient from C@f(z)
+ """
+
+
+ # set up grid points (# = Nz)
+ h = np.log(bound[1]/bound[0])/(Nz - 1) # equally spaced on logscale
+ z = bound[0]*np.exp(np.arange(Nz)*h) # z (Nz by 1)
+
+ # construct C matrix from [1]
+ z_mesh, t_mesh = np.meshgrid(z, t)
+ C = np.exp(-t_mesh*z_mesh)
+ C[:, 0] /= 2.
+ C[:, -1] /= 2.
+ C *= h
+
+ # construct regularization matrix R to impose gaussian-like peaks in f(z)
+ # R - tridiagonal matrix (1,-2,1)
+ Nreg = Nz + 2
+ R = np.zeros([Nreg, Nz])
+ R[0, 0] = 1.
+ R[-1, -1] = 1.
+ R[1:-1, :] = -2*np.diag(np.ones(Nz)) + np.diag(np.ones(Nz-1), 1) \
+ + np.diag(np.ones(Nz-1), -1)
+
+ #R = U*H*Z^T
+ U, H, Z = np.linalg.svd(R, full_matrices=False) # H diagonal
+ Z = Z.T
+ H = np.diag(H)
+
+ #C*Z*inv(H) = Q*S*W^T
+ Hinv = np.diag(1.0/np.diag(H))
+ Q, S, W = np.linalg.svd(C.dot(Z).dot(Hinv), full_matrices=False) # S diag
+ W = W.T
+ S = np.diag(S)
+
+ # construct GammaTilde & Stilde
+ # ||GammaTilde - Stilde*f5||^2 = ||Xi||^2
+ Gamma = np.dot(Q.T, F)
+ Sdiag = np.diag(S)
+ Salpha = np.sqrt(Sdiag**2 + alpha**2)
+ GammaTilde = Gamma*Sdiag/Salpha
+ Stilde = np.diag(Salpha)
+
+ # construct LDP matrices G = Z*inv(H)*W*inv(Stilde), B = -G*GammaTilde
+ # LDP: ||Xi||^2 = min, with constraint G*Xi >= B
+ Stilde_inv = np.diag(1.0/np.diag(Stilde))
+ G = Z @ Hinv @ W @ Stilde_inv
+ B = -G @ GammaTilde
+
+ # call LDP solver
+ Xi = ldp(G, B)
+
+ # final solution
+ zf = np.dot(G, Xi + GammaTilde)
+ f = zf/z
+
+ F_restored = C@zf
+
+ return z, f, F_restored
+
+
+def ldp(G, h):
+ """
+ Helper for Contin() for solving NNLS [1]
+
+ [1] - Lawson and Hanson’s (1974)
+ Parameters:
+ -------------
+ G : matrix
+ Z*inv(H)*W*inv(Stilde)
+ h : array
+ -G*GammaTilde
+
+ Returns:
+ -------------
+ x : array
+ Solution of argmin_x || Ax - b ||_2
+ """
+
+ from scipy.optimize import nnls
+
+ m, n = G.shape
+ A = np.concatenate((G.T, h.reshape(1, m)))
+ b = np.zeros(n+1)
+ b[n] = 1.
+
+ # Solving for argmin_x || Ax - b ||_2
+ x, resnorm = nnls(A, b)
+
+ r = A@x - b
+
+ if np.linalg.norm(r) == 0:
+ print('\n No solution found, try different input!')
+ else:
+ x = -r[0:-1]/r[-1]
+ return x
diff --git a/modules/demo.py b/modules/demo.py
new file mode 100644
index 0000000..f11bfd9
--- /dev/null
+++ b/modules/demo.py
@@ -0,0 +1,63 @@
+def demo(Index, Nz, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, dt, Methods, Plot, Residuals, LCurve, Arrhenius, Heatplot):
+ """Gets data from widgets initialized with interface(),
+ calls laplace() to process data and calls plot_data(), hp()
+ to plot results
+
+ Parameters:
+ -------------
+ Index : int
+ Index of transient in dataset
+ Nz : int
+ Value which is length of calculated vector
+ Reg_L1, Reg_L2 : floats
+ Reg. parameters for FISTA(L1) and L2 regularization
+ Reg_C, Reg_S : floats
+ Reg. parameters for CONTIN and reSpect algorithms
+ Bounds : list
+ [lowerbound, upperbound ] bounds of emission rates domain points
+ dt : int
+ Time step of transient data points in ms
+ Methods : list
+ Methods to process data
+ Plot : boolean
+ Calls plot_data() if True
+ Residuals : boolean
+ Calls regopt() and plots L-curve to control
+ regularization if True
+ LCurve : boolean,
+ Automatically picks optimal reg. parameter from
+ L-curve if True
+ Heatplot : boolean
+ Plots heatplot for all dataset and saves data
+ in .LDLTS if True
+ """
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+
+ from read_file import read_file
+ from laplace import laplace
+ from plot_data import plot_data
+ from hp import hp
+ from read_file import read_file
+ from regopt import regopt
+ from interface import interface
+
+ Bounds = 10.0**np.asarray(Bounds)
+
+ t, C, T = read_file(interface.path, dt, proc = True)# time, transients, temperatures
+
+ data = laplace(t, C[Index], Nz, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Methods)
+ #print(data)
+
+ if Plot:
+ ay, aph = plot_data(t, C[Index], data, T, Index)
+
+ if Heatplot:
+ hp(t, C, T, aph, Methods, Index, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz, LCurve, Arrhenius)
+
+ if Residuals:
+ regopt(t, C[Index], ay, Methods, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz)
+
+ plt.show()
+
\ No newline at end of file
diff --git a/modules/filebrowser.py b/modules/filebrowser.py
new file mode 100644
index 0000000..860804a
--- /dev/null
+++ b/modules/filebrowser.py
@@ -0,0 +1,53 @@
+import os
+import ipywidgets as widgets
+
+l = widgets.Layout(width='50%')
+
+class FileBrowser(object):
+ def __init__(self):
+ self.path = os.getcwd()
+ self._update_files()
+
+ def _update_files(self):
+ self.files = list()
+ self.folders = list()
+ if(os.path.isdir(self.path)):
+ for f in os.listdir(self.path):
+ ff = os.path.join(self.path, f)
+ if os.path.isdir(ff):
+ self.folders.append(f)
+ else:
+ self.files.append(f)
+
+ def widget(self):
+ box = widgets.VBox()
+ self._update(box)
+ return box
+
+ def _update(self, box):
+
+ def on_click(b):
+ if b.description == '..':
+ self.path = os.path.split(self.path)[0]
+ else:
+ self.path = os.path.join(self.path, b.description)
+ self._update_files()
+ self._update(box)
+
+ buttons = []
+ if self.files or self.folders:
+ button = widgets.Button(layout = l, description='..', button_style='primary')
+ button.on_click(on_click)
+ buttons.append(button)
+ for f in self.folders:
+ if f[0] != '.' and f[:2] != '__' and f != 'processed' and f != 'modules':
+ button = widgets.Button(layout = l, description=f, button_style='info')
+ button.on_click(on_click)
+ buttons.append(button)
+ for f in self.files:
+ if f[0] != '.' and f[-5:] == '.DLTS' or f[-5:] == '.PERS':
+ button = widgets.Button(layout = l, description=f, button_style='success')
+ button.on_click(on_click)
+ buttons.append(button)
+
+ box.children = tuple([widgets.HTML("%s
" % (self.path,))] + buttons)
diff --git a/modules/fista.py b/modules/fista.py
new file mode 100644
index 0000000..6d616c3
--- /dev/null
+++ b/modules/fista.py
@@ -0,0 +1,685 @@
+"""
+Module implementing the FISTA algorithm
+"""
+from __future__ import division, print_function
+
+__author__ = 'Jean KOSSAIFI'
+
+import numpy as np
+import sys
+from scipy.linalg import norm
+from math import sqrt
+from sklearn.base import BaseEstimator
+#from sklearn.datasets.base import Bunch
+from sklearn.metrics import roc_auc_score
+from hashlib import sha1
+
+
+def mixed_norm(coefs, p, q=None, n_samples=None, n_kernels=None):
+ """ Computes the (p, q) mixed norm of the vector coefs
+
+ Parameters
+ ----------
+ coefs : ndarray
+ a vector indexed by (l, m)
+ with l in range(0, n_kernels)
+ and m in range(0, n_samples)
+
+ p : int or np.inf
+
+ q : int or np.int
+
+ n_samples : int, optional
+ number of elements in each kernel
+ default is None
+
+ n_kernels : int, optional
+ number of kernels
+ default is None
+
+ Returns
+ -------
+ float
+ """
+ if q is None or p == q:
+ return norm(coefs, p)
+ else:
+ return norm([norm(i, p) for i in coefs.reshape(
+ n_kernels, n_samples)], q)
+
+
+def dual_mixed_norm(coefs, n_samples, n_kernels, norm_):
+ """ Returns a function corresponding to the dual mixt norm
+
+ Parameters
+ ----------
+ coefs : ndarray
+ a vector indexed by (l, m)
+ with l in range(0, n_kernels)
+ and m in range(0, n_samples)
+
+ n_samples : int, optional
+ number of elements in each kernel
+ default is None
+
+ n_kernels : int, optional
+ number of kernels
+ default is None
+
+ norm_ : {'l11', 'l12', 'l21', 'l22'}
+ the dual mixed norm we want to compute
+
+ Returns
+ -------
+ float
+ """
+ if norm_ == 'l11':
+ res = norm(coefs, np.inf)
+ elif norm_ == 'l12':
+ res = mixed_norm(coefs, np.inf, 2, n_samples, n_kernels)
+ elif norm_ == 'l21':
+ res = mixed_norm(coefs, 2, np.inf, n_samples, n_kernels)
+ else:
+ res = norm(coefs, 2)
+ return res
+
+
+def by_kernel_norm(coefs, p, q, n_samples, n_kernels):
+ """ Computes the (p, q) norm of coefs for each kernel
+
+ Parameters
+ ----------
+ coefs : ndarray
+ a vector indexed by (l, m)
+ with l in range(0, n_kernels)
+ and m in range(0, n_samples)
+
+ p : int or np.inf
+
+ q : int or np.inf
+
+ n_samples : int, optional
+ number of elements in each kernel
+ default is None
+
+ n_kernels : int, optional
+ number of kernels
+ default is None
+
+ Returns
+ -------
+ A list of the norms of the sub vectors associated to each kernel
+ """
+ return [mixed_norm(i, p, q, n_samples, 1)
+ for i in coefs.reshape(n_kernels, n_samples)]
+
+
+def prox_l11(u, lambda_):
+ """ Proximity operator for l(1, 1, 2) norm
+
+
+
+ :math:`\\hat{\\alpha}_{l,m} = sign(u_{l,m})\\left||u_{l,m}| - \\lambda \\right|_+`
+
+ Parameters
+ ----------
+ u : ndarray
+ The vector (of the n-dimensional space) on witch we want
+ to compute the proximal operator
+
+ lambda_ : float
+ regularisation parameter
+
+ Returns
+ -------
+ ndarray : the vector corresponding to the application of the
+ proximity operator to u
+
+ """
+ return np.sign(u) * np.maximum(np.abs(u) - lambda_, 0.)
+
+def prox_l22(u, lambda_):
+ """ proximity operator l(2, 2, 2) norm
+
+ Parameters
+ ----------
+
+ u : ndarray
+ The vector (of the n-dimensional space) on witch we want to compute the proximal operator
+
+ lambda_ : float
+ regularisation parameter
+
+ Returns
+ -------
+
+ ndarray : the vector corresponding to the application of the proximity operator to u
+
+ Notes
+ -----
+
+ :math:`\\hat{\\alpha}_{l,m} = \\frac{1}{1 + \\lambda} \\, u_{l,m}`
+
+ """
+ return 1./(1.+lambda_)*u
+
+def prox_l21_1(u, l, n_samples, n_kernels):
+ """ Proximity operator l(2, 1, 1) norm
+
+ Parameters
+ ----------
+ u : ndarray
+ The vector (of the n-dimensional space) on witch we want to compute the proximal operator
+
+ lambda_ : float
+ regularisation parameter
+
+ n_samples : int, optional
+ number of elements in each kernel
+ default is None
+
+ n_kernels : int, optional
+ number of kernels
+ default is None
+
+ Returns
+ -------
+ ndarray : the vector corresponding to the application of the proximity operator to u
+
+
+ Notes
+ -----
+
+ .. math::
+
+ \hat{\alpha}_{l,m} = u_{l,m} \left| 1 - \frac{\lambda}{\|u_{l \bullet}\|_{2}} \right|_+\
+
+ where l is in range(0, n_samples) and m is in range(0, n_kernels)
+ so :math:`u_{l\\bullet}` = [u(l, m) for m in n_kernels]
+
+ """
+ return (u.reshape(n_kernels, n_samples) *\
+ [max(1. - l/norm(u[np.arange(n_kernels)*n_samples+i], 2), 0.)
+ for i in range(n_samples)]).reshape(-1)
+
+
+def prox_l21(u, l, n_samples, n_kernels):
+ """ proximity operator l(2, 1, 2) norm
+
+ Parameters
+ ----------
+ u : ndarray
+ The vector (of the n-dimensional space) on witch we want to compute the proximal operator
+
+ lambda_ : float
+ regularisation parameter
+
+ n_samples : int, optional
+ number of elements in each kernel
+ default is None
+
+ n_kernels : int, optional
+ number of kernels
+ default is None
+
+
+ Returns
+ -------
+ ndarray : the vector corresponding to the application of the proximity operator to u
+
+ Notes
+ -----
+
+ :math:`\\hat{\\alpha}_{l,m} = u_{l,m} \\left| 1 - \\frac{ \\lambda}{ \\|u_{l \\bullet }\\|_{2}} \\right|_+`
+
+ where l is in range(0, n_kernels) and m is in range(0, n_samples)
+ so :math:`u_{l \\bullet }` = [u(l, m) for l in n_samples]
+
+ """
+ for i in u.reshape(n_kernels, n_samples):
+ n = norm(i, 2)
+ if n==0 or n==np.Inf:
+ i[:] = 0
+ else:
+ i[:] *= max(1. - l/n, 0.)
+ # !! If you do just i *= , u isn't modified
+ # The slice is needed here so that the array can be modified
+ return u
+
+
+def prox_l12(u, l, n_samples, n_kernels):
+ """ proximity operator for l(1, 2, 2) norm
+
+ Parameters
+ ----------
+ u : ndarray
+ The vector (of the n-dimensional space) on witch we want to compute the proximal operator
+
+ lambda_ : float
+ regularization parameter
+
+ n_samples : int, optional
+ number of elements in each kernel
+ default is None
+
+ n_kernels : int, optional
+ number of kernels
+ default is None
+
+ Returns
+ -------
+ ndarray : the vector corresponding to the application of the proximity operator to u
+
+
+ Notes
+ -----
+
+ :math:`\\hat{\\alpha}_{l,m} = sign(u_{l,m})\\left||u_{l,m}| - \\frac{\\lambda \\sum\\limits_{m_l=1}^{M_l} u2_{l,m_l}}{(1+\\lambda M_l) \\|u_{l \\bullet }\\|_{2}} \\right|_+`
+
+ where :math:`u2_{l,m_l}` denotes the :math:`|u_{l,m_l}|`
+ ordered by descending order for fixed :math:`l`, and the
+ quantity :math:`M_l` is the number computed in compute_M
+
+ """
+ for i in u.reshape(n_kernels, n_samples):
+ Ml, sum_Ml = compute_M(i, l, n_samples)
+ # i[:] so that u is really modified
+ n = norm(i, 2)
+ if n == 0 or n == np.Inf:
+ i[:] = 0
+ else:
+ i[:] = np.sign(i)*np.maximum(
+ np.abs(i)-(l*sum_Ml)/((1.+l*Ml)*n), 0.)
+ return u
+
+def compute_M(u, lambda_, n_samples):
+ """
+ Parameters
+ ----------
+ u : ndarray
+ ndarray of size (n_samples * n_samples) representing a subvector of K,
+ ie the samples for a single kernel
+
+ lambda_ : int
+
+ n_samples : int
+ number of elements in each kernel
+ ie number of elements of u
+
+ Notes
+ -----
+
+ :math:`M_l` is the number such that
+
+ :math:`u2_{l,M_l+1} \\leq \\lambda \\sum_{m_l=1}^{M_l+1} \\left( u2_{l,m_l} - u2_{l,M_l+1}\\right)`
+
+ and
+
+
+ :math:`u2_{l,M_l} > \\lambda\\sum_{m_l=1}^{M_l} \\left( u2_{l,m_l} - u2_{k,M_l}\\right)`
+
+ Detailed explication
+
+ let u denotes |u(l)|, the vector associated with the kernel l, ordered by descending order
+ Ml is the integer such that
+ u(Ml) <= l * sum(k=1..Ml + 1) (u(k) - u(Ml + 1)) (S1)
+ and
+ u(Ml) > l * sum(k=1..Ml) (u(k) - u(Ml) (S2)
+ Note that in that definition, Ml is in [1..Ml]
+ In python, while Ml is in [1..(Ml-1)], indices will be in [0..(Ml-1)], so we must take care of indices.
+ That's why, we consider Ml is in [0..(Ml-1)] and, at the end, we add 1 to the result
+
+ Detailed example
+
+ if u(l) = [0 1 2 3] corresponds to the vector associated with a kernel
+ then u = |u(l)| ordered by descending order ie u = [3 2 1 0]
+
+ Then u = [3 2 1 0]
+ let l = 1
+ Ml is in {0, 1, 2} (not 3 because we also consider Ml+1)
+ # Note : in fact Ml is in {1, 2, 3} but it is more convenient
+ # to consider it is in {0, 1, 2} as indexing in python starts at 0
+ # We juste have to add 1 to the final result
+
+ if Ml = 0 then S1 = 1 and S2 = 0
+ if Ml = 1 then S1 = 3 and S2 = 1
+ if Ml = 2 then S1 = 6 and S2 = 3
+
+ if Ml = 0 then u(Ml+1)=u(1)=2 > l*... =1 (S1 is not verified)
+ and u(Ml)=u(0)=3 > l*... =0 (S2 is verified)
+
+ if Ml = 1 then u(Ml+1)=u(2)=1 <= l*... =3 (S1 is verified)
+ and u(Ml)=u(1)=2 > l*... =1 (S2 is verified)
+
+ if Ml = 2 then u(Ml+1)=u(3)=0 <= l*... =6 (S1 is verified)
+ but u(Ml)=u(2)=1 <= l*... =3 (S1 is not verified)
+
+ Conclusion : Ml = 1 + 1 !!
+ Ml = 2 because in python, indexing starts at 0, so Ml +1
+
+ """
+ u = np.sort(np.abs(u))[::-1]
+ S1 = u[1:] - lambda_*(np.cumsum(u)[:-1] - (np.arange(n_samples-1)+1)*u[1:])
+ S2 = u[:-1] - lambda_*(np.cumsum(u)[:-1] - (np.arange(n_samples-1)+1)*u[:-1])
+ Ml = np.argmax((S1<=0.) & (S2>0.)) + 1
+
+ return Ml, np.sum(u[:Ml]) # u[:Ml] = u[0, 1, ..., Ml-1] !!
+
+
+def hinge_step(y, K, Z):
+ """
+ Returns the point in witch we apply gradient descent
+
+ parameters
+ ----------
+ y : np-array
+ the labels vector
+
+ K : 2D np-array
+ the concatenation of all the kernels, of shape
+ n_samples, n_kernels*n_samples
+
+ Z : a linear combination of the last two coefficient vectors
+
+ returns
+ -------
+ res : np-array of shape n_samples*,_kernels
+ a point of the space where we will apply gradient descent
+ """
+ return np.dot(K.transpose(), np.maximum(1 - np.dot(K, Z), 0))
+
+def least_square_step(y, K, Z):
+ """
+ Returns the point in witch we apply gradient descent
+
+ parameters
+ ----------
+ y : np-array
+ the labels vector
+
+ K : 2D np-array
+ the concatenation of all the kernels, of shape
+ n_samples, n_kernels*n_samples
+
+ Z : a linear combination of the last two coefficient vectors
+
+ returns
+ -------
+ res : np-array of shape n_samples*,_kernels
+ a point of the space where we will apply gradient descent
+ """
+ return np.dot(K.transpose(), y - np.dot(K,Z))
+
+
+def _load_Lipschitz_constant(K):
+ """ Loads the Lipschitz constant and computes it if not already saved
+
+ Parameters
+ ----------
+ K : 2D-ndarray
+ The matrix of witch we want to compute the Lipschitz constant
+
+ Returns
+ -------
+ float
+
+ Notes
+ -----
+ Lipshitz constant is just a number < 2/norm(np.dot(K, K.T), 2)
+
+ The constant is stored in a npy hidden file, in the current directory.
+ The filename is the sha1 hash of the ndarray
+
+ """
+ try:
+ mu = np.load('./.%s.npy' % sha1(K).hexdigest())
+ except:
+ mu = 1/norm(np.dot(K, K.transpose()), 2)
+ np.save('./.%s.npy' % sha1(K).hexdigest(), mu)
+ return mu
+
+
+class Fista(BaseEstimator):
+ """
+
+ Fast iterative shrinkage/thresholding Algorithm
+
+ Parameters
+ ----------
+
+ lambda_ : int, optional
+ regularization parameter
+ default is 0.5
+
+ loss : {'squared-hinge', 'least-square'}, optional
+ the loss function to use
+ defautl is 'squared-hinge'
+
+ penalty : {'l11', 'l22', 'l12', 'l21'}, optional
+ norm to use as penalty
+ default is l11
+
+ n_iter : int, optional
+ number of iterations
+ default is 1000
+
+ recompute_Lipschitz_constant : bool, optional
+ if True, the Lipschitz constant is recomputed every time
+ if False, it is stored based on it's sha1 hash
+ default is False
+
+ """
+
+ def __init__(self, lambda_=0.5, loss='squared-hinge', penalty='l11', n_iter=1000, recompute_Lipschitz_constant=False):
+ self.n_iter = n_iter
+ self.lambda_ = lambda_
+ self.loss = loss
+ self.penalty = penalty
+ self.p = int(penalty[1])
+ self.q = int(penalty[2])
+ self.recompute_Lipschitz_constant = recompute_Lipschitz_constant
+
+ def fit(self, K, y, Lipschitz_constant=None, verbose=0, **params):
+ """ Fits the estimator
+
+ We want to solve a problem of the form y = KB + b
+ where K is a (n_samples, n_kernels*n_samples) matrix.
+
+ Parameters
+ ---------
+ K : ndarray
+ numpy array of shape (n, p)
+ K is the concatenation of the p/n kernels
+ where each kernel is of size (n, n)
+
+ y : ndarray
+ an array of the labels to predict for each kernel
+ y is of size p
+ where K.shape : (n, p)
+
+ Lipschitz_constant : float, optional
+ allow the user to pre-compute the Lipschitz constant
+ (its computation can be very slow, so that parameter is very
+ useful if you were to use several times the algorithm on the same data)
+
+ verbose : {0, 1}, optional
+ verbosity of the method : 1 will display information while 0 will display nothing
+ default = 0
+
+ Returns
+ -------
+ self
+ """
+ next_step = hinge_step
+ if self.loss=='squared-hinge':
+ K = y[:, np.newaxis] * K
+ # Equivalent to K = np.dot(np.diag(y), X) but faster
+ elif self.loss=='least-square':
+ next_step = least_square_step
+
+ (n_samples, n_features) = K.shape
+ n_kernels = int(n_features/n_samples) # We assume each kernel is a square matrix
+ self.n_samples, self.n_kernels = n_samples, n_kernels
+
+ if Lipschitz_constant==None:
+ Lipschitz_constant = _load_Lipschitz_constant(K)
+
+ tol = 10**(-6)
+ coefs_current = np.zeros(n_features, dtype=np.float) # coefficients to compute
+ coefs_next = np.zeros(n_features, dtype=np.float)
+ Z = np.copy(coefs_next) # a linear combination of the coefficients of the 2 last iterations
+ tau_1 = 1
+
+ if self.penalty=='l11':
+ prox = lambda u:prox_l11(u, self.lambda_*Lipschitz_constant)
+ elif self.penalty=='l22':
+ prox = lambda u:prox_l22(u, self.lambda_*Lipschitz_constant)
+ elif self.penalty=='l21':
+ prox = lambda u:prox_l21(u, self.lambda_*Lipschitz_constant, n_samples, n_kernels)
+ elif self.penalty=='l12':
+ prox = lambda u:prox_l12(u, self.lambda_*Lipschitz_constant, n_samples, n_kernels)
+
+ if verbose==1:
+ self.iteration_dual_gap = list()
+
+ for i in range(self.n_iter):
+ coefs_current = coefs_next # B_(k-1) = B_(k)
+ coefs_next = prox(Z + Lipschitz_constant*next_step(y, K, Z))
+
+ tau_0 = tau_1 #tau_(k+1) = tau_k
+ tau_1 = (1 + sqrt(1 + 4*tau_0**2))/2
+
+ Z = coefs_next + (tau_0 - 1)/tau_1*(coefs_next - coefs_current)
+
+ # Dual problem
+ objective_var = 1 - np.dot(K, coefs_next)
+ objective_var = np.maximum(objective_var, 0) # Shrink
+ # Primal objective function
+ penalisation = self.lambda_/self.q*(mixed_norm(coefs_next,
+ self.p, self.q, n_samples, n_kernels)**self.q)
+ loss = 0.5*np.sum(objective_var**2)
+ objective_function = penalisation + loss
+
+ # Dual objective function
+ dual_var = objective_var
+ if self.lambda_ != 0:
+ dual_penalisation = dual_mixed_norm(np.dot(K.T,dual_var)/self.lambda_,
+ n_samples, n_kernels, self.penalty)
+ if self.q==1:
+ # Fenchel conjugate of a mixed norm
+ if dual_penalisation > 1:
+ dual_var = dual_var / dual_penalisation
+ # If we did not normalise, dual_penalisation
+ # would be +infinity ...
+ dual_penalisation = 0
+ else:
+ # Fenchel conjugate of a squared mixed norm
+ dual_penalisation = self.lambda_/2*(dual_penalisation**2)
+ else:
+ dual_penalisation = 0
+ dual_loss = -0.5*np.sum(dual_var**2) + np.sum(dual_var)
+ # trace(np.dot(duat_var[:, np.newaxis], y)) au lieu du sum(dual_var) ?
+ dual_objective_function = dual_loss - self.lambda_/self.q*dual_penalisation
+ gap = abs(objective_function - dual_objective_function)
+
+ if verbose:
+ sys.stderr.write("Iteration : %d\r" % i )
+ # print "iteration %d" % i
+ self.iteration_dual_gap.append(gap)
+ if i%1000 == 0:
+ print("primal objective : %f, dual objective : %f, dual_gap : %f" % (objective_function, dual_objective_function, gap))
+
+ if gap<=tol and i>10:
+ print("convergence at iteration : %d" %i)
+ break
+
+ if verbose:
+ print("dual gap : %f" % gap)
+ print("objective_function : %f" % objective_function)
+ print("dual_objective_function : %f" % dual_objective_function)
+ print("dual_penalisation : %f" % dual_penalisation)
+ print("dual_loss : %f" % dual_loss)
+ self.coefs_ = coefs_next
+ self.gap = gap
+ self.objective_function = objective_function
+ self.dual_objective_function = dual_objective_function
+
+ return self
+
+ def predict(self, K):
+ """ Returns the prediction associated to the Kernels represented by K
+
+ Parameters
+ ----------
+ K : ndarray
+ ndarray of size (n_samples, n_kernels*n_samples) representing the kernels
+
+ Returns
+ -------
+ ndarray : the prediction associated to K
+ """
+ if self.loss=='squared-hinge':
+ res = np.sign(np.dot(K, self.coefs_))
+ res[res==0] = 1
+ return res
+ else:
+ return np.dot(K, self.coefs_)
+
+ def score(self, K, y):
+ """ Returns the score prediction for the given data
+
+ Parameters
+ ----------
+ K : ndarray
+ matrix of observations
+
+ y : ndarray
+ the labels correspondings to K
+
+ Returns
+ -------
+ The percentage of good classification for K
+ """
+ if self.loss=='squared-hinge':
+ return np.sum(np.equal(self.predict(K), y))*100./len(y)
+ else:
+ print("Score not yet implemented for regression\n")
+ return None
+
+ def info(self, K, y):
+ """ For test purpose
+
+ Parameters
+ ----------
+ K : 2D-array
+ kernels
+
+ y : ndarray
+ labels
+ Returns
+ -------
+ A dict of informations
+ """
+ result = Bunch()
+ n_samples, n_kernels = self.n_samples, self.n_kernels
+ nulled_kernels = 0
+ nulled_coefs_per_kernel = list()
+
+ for i in self.coefs_.reshape(n_kernels, n_samples):
+ if len(i[i!=0]) == 0:
+ nulled_kernels = nulled_kernels + 1
+ nulled_coefs_per_kernel.append(len(i[i==0]))
+
+ result['score'] = self.score(K, y)
+ result['norms'] = by_kernel_norm(self.coefs_, self.p, self.q,
+ n_samples, n_kernels)
+ result['nulled_coefs'] = len(self.coefs_[self.coefs_==0])
+ result['nulled_kernels'] = nulled_kernels
+ result['nulled_coefs_per_kernel'] = nulled_coefs_per_kernel
+ result['objective_function'] = self.objective_function
+ result['dual_objective_function'] = self.dual_objective_function
+ result['gap'] = self.gap
+ result['auc_score'] = roc_auc_score(y, self.predict(K))
+ result['lambda_'] = self.lambda_
+
+ return result
diff --git a/modules/hp.py b/modules/hp.py
new file mode 100644
index 0000000..aadb7c2
--- /dev/null
+++ b/modules/hp.py
@@ -0,0 +1,205 @@
+def hp(t, C, T, ahp, Methods, Index, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz, LCurve = False, Arrhenius = False):
+ """Plots heatmap in ahp = [ahp1, ahp2] axes
+ ahp1 for T vs Emission rates and
+ ahp2 for Arrhenius or DLTS plots
+
+ Parameters:
+ -------------
+ t : array
+ Time domain data from experiment
+ C : 2D array (len(t), len(T))
+ Contains transients for temperatures 1, 2, ...
+ [F1(t), F2(t),...] from experiment
+ T : array
+ Temperature from experiment
+ ahp : list of matplotlib axes [ahp1, ahp2]
+ Axes to plot heatplot and Arrhenius
+ Methods : list
+ Method names used for plotting
+ Index : int
+ Index to plot specific slice of heatplot
+ Reg_L1, Reg_L2 : floats
+ Reg. parameters for FISTA(L1) and L2 regularization
+ Reg_C, Reg_S : floats
+ Reg. parameters for CONTIN and reSpect algorithms
+ Bounds : list
+ [lowerbound, upperbound] of s domain points
+ Nz : int
+ Value which is length of calculated vector f(s)
+ LCurve : boolean
+ Plot using L-curve criteria if True
+ """
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+
+ from matplotlib import cm
+ from matplotlib import gridspec
+
+ from L1_FISTA import l1_fista
+ from L2 import L2
+ from L1L2 import L1L2
+ from contin import Contin
+ from reSpect import reSpect
+ from regopt import regopt
+
+ import sys
+
+ def progressbar(i, iterations):
+ """Prints simple progress bar"""
+ i = i + 1
+ sys.stdout.write("[%-20s] %d%% Building Heatmap" % ('#'*np.ceil(i*100/iterations*0.2).astype('int'),
+ np.ceil(i*100/iterations))+'\r')
+ sys.stdout.flush()
+
+ cut = len(T)
+ cus = Nz
+
+ if len(Methods) > 1:
+ print('Choose only one Method')
+ Methods = Methods[0]
+
+ XZ = []
+ YZ = []
+ ZZ = []
+
+ for M in Methods:
+ if M == 'FISTA':
+ for i in range(0, cut):
+ YZ.append(np.ones(cus)*T[i])
+ TEMPE, TEMPX, a = l1_fista(t, C[i], Bounds, Nz, Reg_L1)
+ XZ.append(TEMPE)
+ ZZ.append(TEMPX)
+
+ progressbar(i, cut)
+
+ elif M == 'L2':
+ for i in range(0, cut):
+ YZ.append(np.ones(cus)*T[i])
+ TEMPE, TEMPX, a = L2(t, C[i], Bounds, Nz, Reg_L2)
+ XZ.append(TEMPE)
+ ZZ.append(TEMPX)
+
+ progressbar(i, cut)
+
+ elif M == 'L1+L2':
+ for i in range(0, cut):
+ YZ.append(np.ones(cus)*T[i])
+ TEMPE, TEMPX, a = L1L2(t, C[i], Bounds, Nz, Reg_L1, Reg_L2)
+ XZ.append(TEMPE)
+ ZZ.append(TEMPX)
+
+ progressbar(i, cut)
+
+ elif M == 'Contin':
+ for i in range(0, cut):
+ YZ.append(np.ones(cus)*T[i])
+ if LCurve:
+ ay = 0
+ Reg_C = regopt(t, C[i], ay, Methods, Reg_L1, Reg_L2,
+ Reg_C, Reg_S, Bounds, Nz, LCurve)
+ TEMPE, TEMPX, a = Contin(t, C[i], Bounds, Nz, Reg_C)
+ #print(YZ[-1][0], 'K; a = ', Reg_C)
+ XZ.append(TEMPE)
+ ZZ.append(TEMPX*TEMPE)
+
+ progressbar(i, cut)
+
+ elif M == 'reSpect':
+ for i in range(0, cut):
+ YZ.append(np.ones(cus)*T[i])
+ if LCurve:
+ ay = 0
+ Reg_S = regopt(t, C[i], ay, Methods, Reg_L1, Reg_L2,
+ Reg_C, Reg_S, Bounds, Nz, LCurve)
+ TEMPE, TEMPX, a = reSpect(t, C[i], Bounds, Nz, Reg_S)
+ #print(YZ[-1][0], 'K; a = ', Reg_C)
+ XZ.append(TEMPE)
+ ZZ.append(TEMPX)
+
+ progressbar(i, cut)
+
+
+
+ XZ = np.asarray(XZ)
+ YZ = np.asarray(YZ)
+ ZZ = np.asarray(ZZ)
+
+ ahp1, ahp2 = ahp[0], ahp[1]
+
+ if Methods[0] == 'reSpect':
+ v = np.abs(np.average(ZZ[10:-10,20:-20]))*20
+ vmin, vmax = 0, v/2
+ cmap = cm.jet
+ levels = np.linspace(vmin, vmax, 20)
+ #print(v)
+
+ elif Methods[0] == 'Contin':
+ v = np.abs(np.average(ZZ[10:-10,20:-20]))*10
+ vmin, vmax = 0, v
+ cmap = cm.jet
+ levels = 20
+
+ elif Methods[0] == 'FISTA' or Methods[0] == 'L2' or Methods[0] == 'L1+L2':
+ v = np.abs(np.average(ZZ))*50
+ vmin, vmax = -v, v
+ cmap = cm.bwr
+ levels = np.linspace(vmin, vmax, 20)
+
+ #extent = [np.log10(Bounds[0]), np.log10(Bounds[1]), (T[-1]), (T[0])]
+
+ x, y = np.meshgrid(TEMPE, T)
+
+ ahp1.set_xlabel(r'Emission rate $e_{n,p}$, s')
+ ahp1.set_title(Methods[0])
+ ahp1.set_ylabel('Temperature T, K')
+ ahp1.grid(True)
+ #normalize = plt.Normalize(vmin = -v, vmax = v)
+
+ heatmap = ahp1.contourf(x, y, ZZ, levels = levels, cmap=cmap, corner_mask = False,
+ vmin = vmin, vmax = vmax, extend = 'both')
+ plt.colorbar(heatmap)
+ ahp1.set_xscale('log')
+
+ if Arrhenius:
+
+ ahp2.ticklabel_format(style='sci', axis='x', scilimits=(0,0), useOffset = False)
+
+ arrh = ahp2.contourf(1/y, np.log(x*y**-2), ZZ, levels = levels, cmap=cmap,
+ vmin = vmin, vmax = vmax, extend = 'both')
+ #ahp2.set_xscale('log')
+ ahp2.set_xlabel('Temperature $1/T$, $K^-1$')
+ ahp2.set_ylabel('$\ln(e\cdot T^-2)$')
+
+ else:
+ ahp2.set_xlabel('Temperature, K')
+ ahp2.set_ylabel('LDLTS signal, arb. units')
+ for i in range(int(len(TEMPE)*0.1), int(len(TEMPE)*0.8), 20):
+ # ad.plot(T, ZZ[:, i], label=r'$\tau = %.3f s$'%(1/TEMPE[i]))
+ ahp2.plot(T, ZZ[:, i]/np.amax(ZZ[:,i]), label=r'$\tau = %.3f s$'%(1/TEMPE[i]))
+ #ahp2.set_yscale('log')
+ #ahp2.set_ylim(1E-4, 10)
+ ahp2.grid()
+ ahp2.legend()
+
+ plt.show()
+ plt.tight_layout()
+
+ ##save file
+ #Table = []
+ #Table.append([0] + (1/TEMPE).tolist())
+ #for i in range(cut):
+ # Table.append([T[i]] + (ZZ[i,:]).tolist())
+
+ Table = []
+ e = 1/TEMPE
+ Table.append([0] + e.tolist())
+ for i in range(cut):
+ Table.append([T[i]] + (ZZ[i,:]).tolist())
+
+
+ #print(Table)
+ #Table = np.asarray(Table)
+
+ np.savetxt('processed/NEW-FILE'%((t[1]-t[0])*1000) +'_1'+'.LDLTS',
+ Table, delimiter='\t', fmt = '%4E')
diff --git a/modules/interface.py b/modules/interface.py
new file mode 100644
index 0000000..e6d3a49
--- /dev/null
+++ b/modules/interface.py
@@ -0,0 +1,142 @@
+def interface(path):
+ """Initiates and displays widgets from iPyWigets
+ and sends data to demo() with interactive_output()
+ """
+
+ import numpy as np
+ import ipywidgets as widgets
+
+ from ipywidgets import interact, interactive, fixed, interact_manual, HBox, VBox, Label
+
+ from read_file import read_file
+ from demo import demo
+
+ import warnings
+ warnings.filterwarnings('ignore')
+
+ interface.path = path
+
+
+ t, C, T = read_file(path)
+
+ if len(T.shape) != 0:
+ cut = len(T) - 1
+ else:
+ cut = 1
+ Index = widgets.IntSlider(
+ value=0,
+ min=0, # max exponent of base
+ max=cut, # min exponent of base
+ step=1, # exponent step
+ description='')
+
+ Methods = widgets.SelectMultiple(
+ options = ['FISTA', 'L2', 'L1+L2', 'Contin', 'reSpect'],
+ value = ['Contin'],
+ #rows = 10,
+ description = 'Methods:',
+ disabled = False)
+
+ Nz = widgets.IntText(
+ value=100,
+ description=r'Nf =',
+ disabled=False)
+
+ Reg_L1 = widgets.FloatLogSlider(
+ value=1e-8,
+ base=10,
+ min=-10, # max exponent of base
+ max=1, # min exponent of base
+ step=0.2, # exponent step
+ description=r'FISTA: λ1')
+
+ Reg_L2 = widgets.FloatLogSlider(
+ value=1e-8,
+ base=10,
+ min=-10, # max exponent of base
+ max=1, # min exponent of base
+ step=0.2, # exponent step
+ description=r'L2: λ2')
+
+ Reg_C = widgets.FloatLogSlider(
+ value=1E-1,
+ base=10,
+ min=-8, # max exponent of base
+ max=2, # min exponent of base
+ step=0.2, # exponent step
+ description=r'Contin: λC')
+
+ Reg_S = widgets.FloatLogSlider(
+ value=1E-2,
+ base=10,
+ min=-12, # max exponent of base
+ max=2, # min exponent of base
+ step=0.2, # exponent step
+ description=r'reSpect: λS')
+
+ Bounds = widgets.IntRangeSlider(
+ value=[-2, 2],
+ min=-5,
+ max=5,
+ step=1,
+ description=r'10a to 10b:',
+ disabled=False,
+ continuous_update=False,
+ orientation='horizontal',
+ readout=True,
+ readout_format='d')
+
+ dt = widgets.BoundedFloatText(
+ value=150,
+ min=0,
+ max=1000,
+ step=1,
+ description='Time step, ms',
+ disabled=False)
+
+ Plot = widgets.ToggleButton(
+ value=True,
+ description='Hide graphics?',
+ disabled=False,
+ button_style='success', # 'success', 'info', 'warning', 'danger' or ''
+ tooltip='Hides graphics',
+ icon='eye-slash')
+
+ Residuals = widgets.ToggleButton(
+ value=False,
+ description='Compute L-curve?',
+ disabled=False,
+ button_style='info', # 'success', 'info', 'warning', 'danger' or ''
+ tooltip='Plots L-curve to choose best value of regularization parameter of L2 reg. method',
+ icon='calculator')
+
+ LCurve = widgets.Checkbox(
+ value = False,
+ description = 'Use L-Curve optimal?',
+ disabled = False)
+
+ Arrhenius = widgets.Checkbox(
+ value = False,
+ description = 'Draw Arrhenius instead DLTS?',
+ disabled = False)
+
+ Heatplot = widgets.ToggleButton(
+ value=False,
+ description='Plot heatmap?',
+ disabled=False,
+ button_style='warning', # 'success', 'info', 'warning', 'danger' or ''
+ tooltip='Plots heatmap of data from chosen file',
+ icon='braille')
+
+
+ left_box = VBox([Methods, Nz, dt])
+ centre_box = VBox([Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds])
+ right_box = VBox([LCurve, Arrhenius, Plot, Residuals, Heatplot])
+ ui = widgets.HBox([left_box, centre_box, right_box])
+ Slider = widgets.HBox([Label('Transient №'),Index])
+ out = widgets.interactive_output(demo, {'Index':Index, 'Nz':Nz,
+ 'Reg_L1':Reg_L1, 'Reg_L2':Reg_L2, 'Reg_C':Reg_C, 'Reg_S':Reg_S,
+ 'Bounds':Bounds, 'dt':dt, 'Methods':Methods,
+ 'Plot':Plot, 'LCurve':LCurve, 'Arrhenius':Arrhenius,
+ 'Residuals':Residuals, 'Heatplot': Heatplot})
+ display(ui, Slider, out)
diff --git a/modules/laplace.py b/modules/laplace.py
new file mode 100644
index 0000000..f7ed015
--- /dev/null
+++ b/modules/laplace.py
@@ -0,0 +1,59 @@
+def laplace(t, F, Nz, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Methods):
+
+ """ Initiates routines for chosen method
+
+ Parameters:
+ -------------
+ t : array
+ Time domain data from experiment
+ F : array,
+ Transient data from experiment F(t)
+ Nz : int
+ Number of points z to compute, must be smaller than len(F)
+ Reg_L1, Reg_L2 : floats
+ Reg. parameters for FISTA(L1) and L2 regularization
+ Reg_C, Reg_S : floats
+ Reg. parameters for CONTIN and reSpect algorithms
+ Bounds : list
+ [lowerbound, upperbound] of s domain points
+ Methods : list
+ Names of processing methods
+
+ Returns:
+ -------------
+ data : list of [[s, f, F_restored, Method1], ...]
+ Data list for processed data
+
+ """
+ import numpy as np
+ from L1_FISTA import l1_fista
+ from L2 import L2
+ from L1L2 import L1L2
+ from contin import Contin
+ from reSpect import reSpect
+
+ data = []
+
+ for i in Methods:
+ if i == 'FISTA':
+ s, f, F_hat = l1_fista(t, F, Bounds, Nz, Reg_L1)
+ data.append([s, f, F_hat, 'FISTA'])
+
+ elif i == 'L2':
+ s, f, F_hat = L2(t, F, Bounds, Nz, Reg_L2)
+ data.append([s, f, F_hat, 'L2'])
+
+ elif i == 'L1+L2':
+ s, f, F_hat = L1L2(t, F, Bounds, Nz, Reg_L1, Reg_L2)
+ data.append([s, f, F_hat, 'L1+L2'])
+
+ elif i == 'Contin':
+ s, f, F_hat = Contin(t, F, Bounds, Nz, Reg_C)
+ data.append([s, f, F_hat, 'Contin'])
+
+ elif i == 'reSpect':
+ s, f, F_hat = reSpect(t, F, Bounds, Nz, Reg_S)
+ data.append([s, f, F_hat, 'reSpect'])
+
+ data = np.asarray(data)
+ return data
diff --git a/modules/plot_data.py b/modules/plot_data.py
new file mode 100644
index 0000000..a3466df
--- /dev/null
+++ b/modules/plot_data.py
@@ -0,0 +1,89 @@
+def plot_data(t, F, data, T, Index):
+ """Gets data from demo() and plots it:
+
+ Parameters:
+ -------------
+ t : array
+ Time domain data from experiment
+ F : array,
+ Transient data from experiment F(t)
+ data : list of [[s, f, F_restored, Method1], ...]
+ Data list for processed data
+ T : float
+ Temperature value of certain transient
+ Index : int
+ Index of transient in initial dataset (not data)
+
+ Returns:
+ -------------
+ ay : matplotlib axes
+ Axes for L-Curve plotting
+ [ahp1, ahp2] : list of matplotlib axes
+ Axes for its Arrhenuis or DLTS plots in hp()
+ """
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+
+ ## Plotting main plot f(s)
+ fig = plt.figure(constrained_layout=True, figsize = (9.5,11))
+ widths = [0.5, 0.5]
+ heights = [0.3, 0.3, 0.4]
+ spec = fig.add_gridspec(ncols=2, nrows=3, width_ratios=widths,
+ height_ratios=heights)
+
+
+ ax = fig.add_subplot(spec[0,:])
+ ax.set_title(r'Temperature %.2f K'%T[Index])
+ ax.set_ylabel(r'Amplitude, arb. units')
+ ax.set_xlabel(r'Emission rate, $s^{-1}$')
+ ax.set_xscale('log')
+ ax.grid(True, which = "both", ls = "-")
+ #print(data[:,2])
+ for i, e in enumerate(data[:,-1]):
+ if e == 'FISTA':
+ ax.plot(data[i][0], data[i][1], 'r-', label = e)
+ elif e == 'L2':
+ ax.plot(data[i][0], data[i][1], 'b-', label = e)
+ elif e == 'L1+L2':
+ ax.plot(data[i][0], data[i][1], 'm-', label = e)
+ elif e == 'Contin':
+ ax.plot(data[i][0], data[i][1]*data[i][0], 'c-', label = e)
+ elif e == 'reSpect':
+ ax.plot(data[i][0], data[i][1], 'y-', label = e)
+ ax.legend()
+
+ # Axes for L-Curve
+ ay = fig.add_subplot(spec[1, 0])
+
+ # Plotting transients F(t)
+ az = fig.add_subplot(spec[1, 1])
+ az.set_ylabel(r'Transient , arb. units')
+ az.set_xlabel(r'Time $t$, $s$')
+ az.grid(True, which = "both", ls = "-")
+ az.plot(t, F, 'ks-', label = 'Original')
+ az.set_xscale('log')
+ for i, e in enumerate(data[:,-1]):
+ if e == 'FISTA':
+ d = data[i][2]
+ az.plot(t, d, 'ro-', label = e)
+ elif e == 'L2':
+ d = data[i][2][:-1] # last point sucks
+ az.plot(t[:-1], d, 'b>-', label = e)
+ elif e == 'L1+L2':
+ d = data[i][2]
+ az.plot(t, d, 'm*-', label = e)
+ elif e == 'Contin':
+ d = data[i][2]
+ az.plot(t, d, 'cx-', label = e)
+ elif e == 'reSpect':
+ d = data[i][2]
+ az.plot(t, d - d[-1] + F[-1], 'y+-', label = e)
+ az.legend()
+
+
+ plt.tight_layout()
+
+ ahp1, ahp2 = fig.add_subplot(spec[2, 0]), fig.add_subplot(spec[2, 1])
+
+ return ay, [ahp1, ahp2]
diff --git a/modules/reSpect.py b/modules/reSpect.py
new file mode 100644
index 0000000..9601916
--- /dev/null
+++ b/modules/reSpect.py
@@ -0,0 +1,315 @@
+import numpy as np
+
+from scipy.interpolate import interp1d
+from scipy.integrate import cumtrapz, quad
+from scipy.optimize import nnls, minimize, least_squares
+
+def Initialize_f(F, s, kernMat, *argv):
+ """
+ Computes initial guess for f and then call get_f()
+ to return regularized solution:
+
+ argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
+
+ Parameters:
+ ------------
+ F : array
+ Transient experimental data
+ s : array
+ Tau-domain points
+ kernMat : matrix (len(s), len(F))
+ Matrix of inverse Laplace transform
+ F_b : float
+ Baseline value, optional
+
+ Returns:
+ -----------
+ flam : array
+ Regularized inverse of Laplace transform
+ F_b : float
+ Baseline value
+ """
+
+ f = -5.0 * np.ones(len(s)) + np.sin(np.pi * s) # initial guess for f
+ lam = 1e0
+
+ if len(argv) > 0:
+ F_b = argv[0]
+ flam, F_b = get_f(lam, F, f, kernMat, F_b)
+ return flam, F_b
+ else:
+ flam = get_f(lam, F, f, kernMat)
+ return flam
+
+def get_f(lam, F, f, kernMat, *argv):
+ """
+ Solves following equation for f. Uses jacobianLM() with
+ scipy.optimize.least_squares() solver:
+
+ argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
+
+ Parameters:
+ -------------
+ lambda : float
+ Regularization parameter
+ F : array
+ Transient experimental data
+ f : array
+ Guessed f(s) for emission rates
+ kernMat : matrix (len(f), len(F))
+ Matrix of inverse Laplace transform
+ F_b : float
+ Baseline value,optional
+
+ Returns:
+ -------------
+ flam : array
+ Regularized solution
+ F_b : float
+ Baseline for solution
+ """
+
+ # send fplus = [f, F_b], on return unpack f and F_b
+ if len(argv) > 0:
+ fplus= np.append(f, argv[0])
+ res_lsq = least_squares(residualLM, fplus, jac=jacobianLM, args=(lam, F, kernMat))
+ return res_lsq.x[:-1], res_lsq.x[-1]
+
+ # send normal f, and collect optimized f back
+ else:
+ res_lsq = least_squares(residualLM, f, jac=jacobianLM, args=(lam, F, kernMat))
+ return res_lsq.x
+
+def residualLM(f, lam, F, kernMat):
+ """
+ Computes residuals for below equation
+ and used with scipy.optimize.least_squares():
+
+ argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
+
+ Parameters:
+ -------------
+ f : array
+ Solution for above equation f(s)
+ lambda : float
+ Regularization parameter
+ F : array
+ Experimental transient data
+ kernMat : matrix (len(f), len(F))
+ Matrix of inverse Laplace transform
+ F_b : float
+ Plateau value for F experimental data
+
+ Returns:
+ -----------
+ r : array
+ Residuals (||F - kernel(f)||2)''
+ """
+
+ n = kernMat.shape[0];
+ ns = kernMat.shape[1];
+ nl = ns - 2;
+
+ r = np.zeros(n + nl);
+
+ # if plateau then unfurl F_b
+ if len(f) > ns:
+ F_b = f[-1]
+ f = f[:-1]
+ r[0:n] = (1. - kernel_prestore(f, kernMat, F_b)/F) # same as |F - kernel(f)|
+ else:
+ r[0:n] = (1. - kernel_prestore(f, kernMat)/F) # same as |F - kernel(f)| w/o F_b
+
+ r[n:n+nl] = np.sqrt(lam) * np.diff(f, n=2) # second derivative
+
+ return r
+
+def jacobianLM(f, lam, F, kernMat):
+ """
+ Computes jacobian for scipy.optimize.least_squares()
+
+ argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
+
+ Parameters:
+ -------------
+ f : array
+ Solution for above equation
+ lam : float
+ Regularization parameter
+ F : array
+ Experimental transient data
+ kernMat : matrix (len(f), len(F))
+ Matrix of inverse Laplace transform
+
+ Returns:
+ ------------
+ Jr : matrix (len(f)*2 - 2, len(F) + 1)
+ Contains Jr(i, j) = dr_i/df_j
+ """
+
+ n = kernMat.shape[0];
+ ns = kernMat.shape[1];
+ nl = ns - 2;
+
+ # L is a ns*ns tridiagonal matrix with 1 -2 and 1 on its diagonal;
+ L = np.diag(np.ones(ns-1), 1) + np.diag(np.ones(ns-1),-1) + np.diag(-2. * np.ones(ns))
+ L = L[1:nl+1,:]
+
+ # Furnish the Jacobian Jr (n+ns)*ns matrix
+ Kmatrix = np.dot((1./F).reshape(n,1), np.ones((1,ns)));
+
+ if len(f) > ns:
+
+ F_b = f[-1]
+ f = f[:-1]
+
+ Jr = np.zeros((n + nl, ns+1))
+
+ Jr[0:n, 0:ns] = -kernelD(f, kernMat) * Kmatrix;
+ Jr[0:n, ns] = -1./F # column for dr_i/dF_b
+
+ Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
+ Jr[n:n+nl, ns] = np.zeros(nl) # column for dr_i/dF_b = 0
+
+ else:
+
+ Jr = np.zeros((n + nl, ns))
+
+ Jr[0:n, 0:ns] = -kernelD(f, kernMat) * Kmatrix;
+ Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
+
+ return Jr
+
+def kernelD(f, kernMat):
+ """
+ Helper for jacobianLM() approximates dK_i/df_j = K * e(f_j)
+
+ argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
+
+ Parameters:
+ -------------
+ f : array
+ Solution for above equation
+ kernMat : matrix (len(f), len(F))
+ Matrix of inverse Laplace transform
+
+ Returns:
+ -------------
+ DK : matrix (len(f), len(F))
+ Jacobian
+ """
+
+ n = kernMat.shape[0];
+ ns = kernMat.shape[1];
+
+ # A n*ns matrix with all the rows = f'
+ fsuper = np.dot(np.ones((n,1)), np.exp(f).reshape(1, ns))
+ DK = kernMat * fsuper
+
+ return DK
+
+def getKernMat(s, t):
+ """
+ Mesh grid for s and t domain to construct
+ kernel matrix
+
+ Parameters:
+ -------------
+ s: array
+ Tau domain points
+ t: array
+ Time domain points from experiment
+
+ Returns:
+ -------------
+ np.exp(-T/S) * hsv : matrix (len(s), len(t))
+ Matrix of inverse Laplace transform, where hsv
+ trapezoidal coefficients
+ """
+ ns = len(s)
+ hsv = np.zeros(ns);
+ hsv[0] = 0.5 * np.log(s[1]/s[0])
+ hsv[ns-1] = 0.5 * np.log(s[ns-1]/s[ns-2])
+ hsv[1:ns-1] = 0.5 * (np.log(s[2:ns]) - np.log(s[0:ns-2]))
+ S, T = np.meshgrid(s, t);
+
+ return np.exp(-T/S) * hsv;
+
+def kernel_prestore(f, kernMat, *argv):
+ """
+ Function for prestoring kernel
+
+ argmin_f = ||F - kernel(f)||2 + lambda * ||L f||2
+
+ Parameters:
+ -------------
+ f : array
+ Solution of above equation
+ kernMat : matrix (len(f), len(F))
+ Matrix of inverse Laplace transform
+
+ Returns:
+ ------------
+ np.dot(kernMat, np.exp(f)) + F_b :
+ Stores kernMat*(f)+ F_b
+ """
+
+ if len(argv) > 0:
+ F_b = argv[0]
+ else:
+ F_b = 0.
+
+ return np.dot(kernMat, np.exp(f)) + F_b
+
+
+def reSpect(t, F, bound, Nz, alpha):
+ """
+ Main routine to implement reSpect algorithm from [1].
+
+ [1] Shanbhag, S. (2019)
+
+ Parameters:
+ --------------
+ t : array
+ Time domain points from experiment
+ F : array
+ Experimental transient F(t)
+ bound : list
+ [lowerbound, upperbound] of bounds for tau-domain points
+ Nz : int
+ Length of tau-domain array
+ alpha : float
+ Regularization parameter
+
+ Returns:
+ --------------
+ 1/s[::-1] : array
+ Tau-domain points
+ np.exp(f)[::-1] : array
+ Inverse Laplace transform of F(t)
+ kernMat@np.exp(f)[::] : array
+ Restored transient F_restored(t)
+ """
+
+ n = len(t)
+ ns = Nz # discretization of 'tau'
+
+ tmin = t[0];
+ tmax = t[n-1];
+
+ smin, smax = 1/bound[1], 1/bound[0] # s is tau domain points!
+
+ hs = (smax/smin)**(1./(ns-1))
+ s = smin * hs**np.arange(ns) # s here is tau domain points
+
+ kernMat = getKernMat(s, t)
+
+ fgs, F_b = Initialize_f(F, s, kernMat, np.min(F))
+
+ alpha = alpha
+
+ f, F_b = get_f(alpha, F, fgs, kernMat, F_b);
+
+ K = kernel_prestore(f, kernMat, F_b);
+
+ return 1/s[::-1], np.exp(f)[::-1], kernMat@np.exp(f)[::]
diff --git a/modules/read_file.py b/modules/read_file.py
new file mode 100644
index 0000000..14fdf4b
--- /dev/null
+++ b/modules/read_file.py
@@ -0,0 +1,81 @@
+def read_file(Path, dt=150, proc = True):
+ """Returns data from file
+
+ Parameters:
+ --------------
+ Path : str
+ Path to file
+ dt : float
+ Step between time points in ms
+ proc: boolean
+ Process transient if True
+
+ Returns:
+ --------------
+ time : array
+ Time domain points in s
+ C : array of [F1(time), F2(time), ...]
+ Contains transients experimental data
+ for range of temperatures
+ T : array
+ Contains experimental temperature points
+ """
+ import numpy as np
+
+ def process(C, proc):
+ """Returns processed transient C_p if proc is True"""
+
+ def get_Baseline(C):
+ """Returns baseline of transient C"""
+ l = len(C)
+ c1, c2, c3 = C[0], C[int(l/2)-1], C[l-1]
+ return (c1*c3 - c2**2)/(c1+c3 - 2*c2)
+
+ if proc:
+ C_p = C
+ for i, _C in enumerate(C):
+ F = _C
+ F = F/np.average(F[-1])
+ if F[0] > F[-1]:
+ F = F - min(F)
+ else:
+ F = F - max(F)
+ F = np.abs(F)
+ F = F + np.average(F)*2
+ F = F - get_Baseline(F)*0
+ C_p[i] = F
+ return np.asarray(C_p)
+
+ else:
+ C = C/C[-1]
+ return C + np.average(C)*2
+
+ Path = str(Path)
+
+ txt = np.genfromtxt(Path, delimiter='\t')
+ if len(txt.shape) == 2:
+ T = txt[:,0]
+ cut = len(T)
+
+ C = []
+ time = []
+ for i in range(0,cut):
+ C.append(txt[i][3:-2])
+
+ for i in range(0, len(C[0])):
+ time.append(dt/1000*(i+1))
+ else:
+ T = txt[0]
+ C = txt[1:]
+ time = np.arange(dt, dt*len(C), dt)*1e-3
+
+
+
+
+ C = np.asarray(C)
+ C = process(C, proc)
+ time = np.asarray(time)
+ T = np.asarray(T)
+ #print(time)
+
+ return time, C, T
diff --git a/modules/regopt.py b/modules/regopt.py
new file mode 100644
index 0000000..eab4412
--- /dev/null
+++ b/modules/regopt.py
@@ -0,0 +1,189 @@
+def regopt(t, F, ay, Methods, Reg_L1, Reg_L2, Reg_C, Reg_S, Bounds, Nz, LCurve = False):
+ """
+ Computes L-curve from residual and solution norm
+ and derives optimal regularization parameter from
+ its curvature plot. (max(k) -> a_opt)
+
+ Parameters:
+ -------------
+ t : array
+ Time domain data from experiment
+ F : array,
+ Transient data from experiment F(t)
+ ay : matplotlib axes
+ Axes for L-curve plotting
+ Methods : list
+ Names of processing methods
+ Reg_L1, Reg_L2 : floats
+ Reg. parameters for FISTA(L1) and L2 regularization
+ Reg_C, Reg_S : floats
+ Reg. parameters for CONTIN and reSpect algorithms
+ Bounds : list
+ [lowerbound, upperbound] of emission rates domain points
+ Nz : int
+ Number of points in emissior rates domain to compute,
+ must be smaller than len(F)
+ LCurve : boolean
+ If True regopt() returns optimal regularization
+ paremeter for chosen method
+
+ Returns:
+ -------------
+ alpha[i] : float
+ Optimal reg. parameter for chosen method
+ using L-curve criteria
+ """
+ from laplace import laplace
+ #from matplotlib.cm import jet
+ import matplotlib.pyplot as plt
+ import numpy as np
+ from scipy.signal import savgol_filter
+
+ import sys
+
+ def progressbar(i, iterations):
+ """Prints simple progress bar"""
+ i = i + 1
+ sys.stdout.write("[%-20s] %d%% Building L-Curve" % ('#'*np.ceil(i*100/iterations*0.2).astype('int'),
+ np.ceil(i*100/iterations))+'\r')
+ sys.stdout.flush()
+
+ def curvature(x, y, a):
+ """Returns curvature of line defined by:
+
+ k = (x'*y''-x''*y')/((x')^2+(y')^2)^(3/2)
+
+ Parameters:
+ -------------
+ x, y : arrays of x and y respectively
+ a : array of reg parameters
+
+ Returns:
+ -------------
+ k : array of curvature values
+ """
+ x = savgol_filter(x, 13, 1)
+ y = savgol_filter(y, 13, 1)
+ da = np.gradient(a)
+ f_x = np.gradient(x)/da
+ f_y = np.gradient(y)/da
+ f_xx = np.gradient(f_x)/da
+ f_yy = np.gradient(f_y)/da
+
+ k = (f_x*f_yy - f_xx*f_y)/(f_x**2 + f_y**2)**(3/2)
+ return savgol_filter(k, 5, 1)
+ #return k
+
+ res = [] # residuals norm ||Cf - F||2
+ sol = [] # solution norm ||f||2
+
+ alpha_L2 = 10**np.linspace(np.log10(Reg_L2) - 3, np.log10(Reg_L2) + 3, 40)
+ alpha_C = 10**np.linspace(np.log10(Reg_C) - 3, np.log10(Reg_C) + 3, 40)
+ alpha_S = 10**np.linspace(np.log10(Reg_S) - 3, np.log10(Reg_S) + 3, 40)
+
+ if LCurve:
+ alpha_C = 10**np.linspace(np.log10(Reg_C) - 3, np.log10(Reg_C) + 3, 40)
+ alpha = alpha_C
+
+ data = []
+
+ Fx = F
+
+ for i in Methods:
+
+ if len(Methods) > 1:
+ print('!!!Choose only one Method!!!')
+ break
+
+ if i == 'FISTA':
+ break
+
+ elif i == 'L2':
+ for j, v in enumerate(alpha_L2):
+ data = laplace(t, F, Nz, Reg_L1, v, Reg_C, Reg_S, Bounds, Methods)
+ e, f, F_restored = data[0][0], data[0][1], data[0][2]
+
+ res.append(np.linalg.norm(np.abs(Fx) - np.abs(F_restored), ord = 2)**2)
+ sol.append(np.linalg.norm(f, ord = 2)**2)
+ progressbar(j, len(alpha_L2))
+ alpha = alpha_L2
+ break
+
+ elif i == 'L1+L2':
+ break
+
+ elif i == 'Contin':
+ for j, v in enumerate(alpha_C):
+ data = laplace(t, F, Nz, Reg_L1, Reg_L2, v, Reg_S, Bounds, Methods)
+ e, f, F_restored = data[0][0], data[0][1], data[0][2]
+
+ res.append(np.linalg.norm(np.abs(Fx) - np.abs(F_restored), ord = 2)**2)
+ #sol.append(np.linalg.norm(f, ord = 2)**2)
+ sol.append(np.linalg.norm(f*e, ord = 2)**2)
+ progressbar(j, len(alpha_C))
+ alpha = alpha_C
+ break
+
+ elif i == 'reSpect':
+ for j, v in enumerate(alpha_S):
+ data = laplace(t, F, Nz, Reg_L1, Reg_L2, Reg_C, v, Bounds, Methods)
+ e, f, F_restored = data[0][0], data[0][1], data[0][2]
+
+ res.append(np.linalg.norm(np.abs(Fx - Fx[-1]) - np.abs(F_restored - F_restored[-1]), ord = 2)**2)
+ sol.append(np.linalg.norm(f, ord = 2)**2)
+ progressbar(j, len(alpha_S))
+ alpha = alpha_S
+ break
+
+ # Plotting L-curve and its normalized curvature
+
+ if len(data) == 0:
+ ay.annotate(text = 'Choose only one method \n Contin, reSpect or L2',
+ xy = (0.5,0.5), ha="center", size = 16)
+ plt.tight_layout()
+ elif LCurve:
+ k = curvature(np.log10(res), np.log10(sol), alpha)
+ k_max = np.amax(k)
+ if Methods[0] == 'reSpect':
+ i = np.where(k == np.amax(k[1:-1]))
+ else:
+ i = np.where(k == np.amax(k[1:-1]))
+ i = np.squeeze(i)
+ return alpha[i]
+ else:
+ k = curvature(np.log10(res), np.log10(sol), alpha)
+ k_max = np.amax(k)
+ i = np.where(k == np.amax(k))
+ if Methods[0] != 'reSpect':
+ i = np.where(k == np.amax(k[1:-1]))
+ i = np.squeeze(i)
+
+ ay.plot(np.log10(res), np.log10(sol), 'k-', )
+
+ ay.set_ylabel(r'Solution norm $\lg||x||^2_2$', c='k')
+ ay.set_xlabel(r'Residual norm $\lg||\eta-Cx||^2_2$', c='k')
+
+ ay_k = ay.twinx()
+ ay_k_t = ay_k.twiny()
+ ay_k_t.set_xscale('log')
+ ay_k_t.plot(alpha, k/k[i], 'r-')
+ ay_k.set_ylabel(r'Curvature, arb. units', c='r')
+ ay_k.set_ylim(-0.1, 1.1)
+ #ay_k.set_yscale('log')
+ #ay_k.set_ylim(1e-3, 2.0)
+ ay_k_t.set_xlabel(r'Reg. parameter $\lambda_{%.s}$'%(Methods[0]), c='r')
+
+ ay_k_t.spines['top'].set_color('red')
+ ay_k_t.spines['right'].set_color('red')
+ ay_k_t.xaxis.label.set_color('red')
+ ay_k_t.tick_params(axis='x', colors='red', which='both')
+ ay_k.yaxis.label.set_color('red')
+ ay_k.tick_params(axis='y', colors='red', which='both')
+
+ # Draw maximal curvature point of L-curve
+ ay_k_t.plot(alpha[i], k[i]/k[i], 'r*')
+ ay.plot(np.log10(res[i]), np.log10(sol[i]), 'r*') #highlight optimal lambda
+ ay.annotate(r"$\lambda_\mathrm{opt} =$ %.1e"%alpha[i], c = 'k',
+ xy = (np.log10(res[i])*0.98, np.log10(sol[i])*0.98), ha = 'left')
+
+ plt.tight_layout()
From 2029b8eb59f9daceeda14a35e02cef9196c529af Mon Sep 17 00:00:00 2001
From: Anton Vasilev <31480657+nocliper@users.noreply.github.com>
Date: Mon, 18 Dec 2023 15:50:27 +0300
Subject: [PATCH 3/3] Update interface.py
---
modules/interface.py | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/modules/interface.py b/modules/interface.py
index 66e0c25..74fd83b 100644
--- a/modules/interface.py
+++ b/modules/interface.py
@@ -39,7 +39,7 @@ def interface(path):
Nz = widgets.IntText(
value=100,
- description=r'N$_\text{f}$ =',
+ description='Nf',
disabled=False)
Reg_L1 = widgets.FloatLogSlider(
@@ -48,7 +48,7 @@ def interface(path):
min=-10, # max exponent of base
max=1, # min exponent of base
step=0.1, # exponent step
- description=r'$λ_\text{FISTA}$')
+ description=r'λ FISTA')
Reg_L2 = widgets.FloatLogSlider(
value=1e-8,
@@ -56,7 +56,7 @@ def interface(path):
min=-15, # max exponent of base
max=1, # min exponent of base
step=0.1, # exponent step
- description=r'$λ_\text{L2}$')
+ description=r'λ L2')
Reg_C = widgets.FloatLogSlider(
value=1E-1,
@@ -64,7 +64,7 @@ def interface(path):
min=-8, # max exponent of base
max=2, # min exponent of base
step=0.1, # exponent step
- description=r'$λ_\text{Contin}$')
+ description=r'λ Contin')
Reg_S = widgets.FloatLogSlider(
value=1E-2,
@@ -72,14 +72,14 @@ def interface(path):
min=-12, # max exponent of base
max=2, # min exponent of base
step=0.1, # exponent step
- description=r'$λ_\text{reSpect}$')
+ description=r'λ reSpect')
Bounds = widgets.IntRangeSlider(
value=[-2, 2],
min=-5,
max=5,
step=1,
- description=r'$10^\text{a} ÷ 10^\text{b}$:',
+ description=r'10ᵃ ÷ 10ᵇ:',
disabled=False,
continuous_update=False,
orientation='horizontal',
@@ -91,7 +91,7 @@ def interface(path):
min=0,
max=1000,
step=1,
- description=r't$_\text{step}$, ms',
+ description=r't step, ms',
disabled=False)
Plot = widgets.ToggleButton(