Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 4 additions & 68 deletions .github/workflows/continuous-integration-pip.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10"]
python-version: ["3.11"]

steps:
- name: Checkout
Expand All @@ -34,75 +34,12 @@ jobs:
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=88 --statistics --exclude="examples/* docs/* build/* utils.py"

testing_unix:
test_os:
needs: [code_lint]
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.9, "3.10", "3.11"]

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools
pip install -r requirements-dev.txt

- name: Build and install package from source
run: python setup.py develop

- name: Test with pytest
run: pytest --cov=./ --cov-report=xml

- name: Upload coverage
uses: codecov/codecov-action@v4.5.0

# test-windows:
# needs: [code_lint]
# runs-on: "windows-latest"
# strategy:
# matrix:
# python-version: ["3.10", "3.11"]

# steps:
# - name: Checkout
# uses: actions/checkout@v4

# - name: Set up Python ${{ matrix.python-version }}
# uses: actions/setup-python@v5
# with:
# python-version: ${{ matrix.python-version }}

# - name: Install dependencies
# run: |
# C:\msys64\pacman -S mingw-w64-x86_64-openblas
# python -m pip install --upgrade pip
# pip install setuptools
# pip install -r requirements-dev.txt

# - name: Build and install package from source
# run: python setup.py develop

# - name: Test with pytest
# run: pytest --cov=./ --cov-report=xml

# - name: Upload coverage
# uses: codecov/codecov-action@v4.5.0

testing_mac_intel:
needs: [code_lint]
runs-on: "macos-13"
runs-on: "ubuntu-latest"
strategy:
matrix:
python-version: [3.9, "3.10", "3.11"]
python-version: ["3.10", "3.11", "3.12", "3.13"]

steps:
- name: Checkout
Expand All @@ -115,7 +52,6 @@ jobs:

- name: Install dependencies
run: |
brew install gfortran
python -m pip install --upgrade pip
pip install setuptools
pip install -r requirements-dev.txt
Expand Down
2 changes: 1 addition & 1 deletion examples/MAF/plot_2d_4_MgO.SiO2.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def plot2D(csdm_object, **kwargs):
# Once the ShieldingPALineshape instance is created, use the
# :meth:`~mrinversion.kernel.nmr.ShieldingPALineshape.kernel` method of the
# instance to generate the MAF line-shape kernel.
K = lineshape.kernel(supersampling=1)
K = lineshape.kernel(supersampling=1).real
print(K.shape)

# %%
Expand Down
10 changes: 5 additions & 5 deletions mrinversion/linear_model/_base_l1l2.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,18 +523,18 @@ def cross_validation_curve(self):

def cv(l1, X, y, cv):
"""Return the cross-validation score as negative of mean square error."""
if isinstance(l1, (Lasso, MultiTaskLasso)):
fit_params = {"check_input": False}
if isinstance(l1, LassoLars):
fit_params = None # {"Xy": np.dot(X.T, y)}
# if isinstance(l1, (Lasso, MultiTaskLasso)):
# fit_params = {"check_input": False}
# if isinstance(l1, LassoLars):
# fit_params = None # {"Xy": np.dot(X.T, y)}

cv_score = cross_validate(
l1,
X=X,
y=y,
scoring="neg_mean_squared_error", # 'neg_mean_absolute_error",
cv=cv,
fit_params=fit_params,
# fit_params=fit_params,
n_jobs=1,
verbose=0,
)
Expand Down
203 changes: 203 additions & 0 deletions mrinversion/linear_model/fista/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
import numba as nb
import numpy as np
from numpy.linalg import norm


@nb.njit(fastmath=True)
def l1_soft_threshold(a: np.ndarray, c: float):
"""Soft threshold without non-negative constraint."""
b = np.abs(a) - c
b = np.where(b >= 0, b * np.sign(a), 0.0)
return b


@nb.njit(fastmath=True)
def l1_soft_threshold_nonnegative(a: np.ndarray, c: float):
"""Soft threshold with non-negative constraint."""
b = a - c
b += np.abs(b)
b /= 2.0
return b


@nb.njit(fastmath=True)
def fista(
matrix: np.ndarray,
s: np.ndarray,
f_k: np.ndarray,
lam: float,
max_iter: int,
tol: float,
l_inv: float,
nonnegative: bool = False,
):
matrix = np.asarray(matrix)
s = np.asarray(s)

residue = np.zeros(max_iter)
data_consistency = np.zeros(max_iter)

gradient = matrix.T @ matrix
c = matrix.T @ s

last_data_consistency = 0.0
normalization_factor = norm(s) ** 2
last_data_consistency = float(normalization_factor)

last_fk_l1 = 0.0
t_kp1 = 1.0
y_k = f_k.copy()

for k in range(max_iter):
t_k = t_kp1
f_km1 = f_k.copy()

t_kp1 = (1.0 + np.sqrt(1.0 + 4.0 * t_k**2)) * 0.5
constant_factor = (t_k - 1.0) / t_kp1

temp_c = gradient @ y_k
temp_c = l_inv * (c - temp_c)
temp_c += y_k

if nonnegative:
f_k[:] = l1_soft_threshold_nonnegative(temp_c, lam * l_inv)
else:
f_k[:] = l1_soft_threshold(temp_c, lam * l_inv)

temp = (matrix @ f_k) - s
residue_temp = norm(temp) ** 2
fk_l1 = np.sum(np.abs(f_k))

residue[k] = residue_temp
data_consistency[k] = residue[k] + lam * fk_l1
residue[k] = residue[k] / normalization_factor

if k >= 5:
recent_avg = np.mean(residue[k - 5 : k])
if abs(1.0 - (recent_avg / residue[k])) <= tol:
break

# data consistency check
if data_consistency[k] > last_data_consistency:
f_k[:] = f_km1.copy()
fk_l1 = last_fk_l1
data_consistency[k] = data_consistency[k - 1]
residue[k] = residue[k - 1]

last_data_consistency = data_consistency[k]
last_fk_l1 = fk_l1

y_k = f_k + constant_factor * (f_k - f_km1)

zf = matrix @ f_k
iter = k

return zf, f_k, residue[: iter + 1], data_consistency[: iter + 1], iter


@nb.njit(fastmath=True)
def fista_cv_nb(
matrix: np.ndarray,
s: np.ndarray,
matrix_test: np.ndarray,
s_test: np.ndarray,
max_iter: int,
lambda_vals: np.ndarray,
nonnegative: bool,
l_inv: float,
tol: float,
):
n_fold = matrix.shape[-1]
n_lambda = len(lambda_vals)
n_targets = s.shape[1]
n_features = matrix.shape[1]
prediction_error = np.zeros((n_lambda, n_fold))
iter_arr = np.zeros(n_lambda)

residue = np.zeros(max_iter)
data_consistency = np.zeros(max_iter)

for fold in range(n_fold):
x_train = matrix[..., fold]
y_train = s[..., fold]
x_test = matrix_test[..., fold]
y_test = s_test[..., fold]
y_points = y_test.shape[0] * y_test.shape[1]

gradient = x_train.T @ x_train
c = x_train.T @ y_train

norm_factor = norm(y_train) ** 2
f_k = np.zeros((n_features, n_targets))
y_k = f_k.copy()

for j, lam in enumerate(lambda_vals):
t_kp1 = 1.0
y_k[:] = 0.0
f_k[:] = 0.0
residue[:] = 0
fk_l1 = 0.0
last_fk_l1 = 0.0
data_consistency[:] = 0
last_data_consistency = norm_factor

for k in range(max_iter):
t_k = t_kp1
f_km1 = f_k.copy()
t_kp1 = (1.0 + np.sqrt(1.0 + 4.0 * t_k**2)) / 2.0
constant_factor = (t_k - 1.0) / t_kp1

grad_yk = gradient @ y_k
temp_c = y_k - l_inv * (grad_yk - c)

if nonnegative:
f_k[:] = l1_soft_threshold_nonnegative(temp_c, l_inv * lam)
else:
f_k[:] = l1_soft_threshold(temp_c, l_inv * lam)

residue[k] = norm(x_train @ f_k - y_train) ** 2
fk_l1 = np.sum(np.abs(f_k))
data_consistency[k] = residue[k] + lam * fk_l1

if k >= 5:
recent_avg = np.mean(residue[k - 5 : k])
if abs(1.0 - (recent_avg / residue[k])) <= tol:
break

# data consistency check
if data_consistency[k] > last_data_consistency:
f_k[:] = f_km1.copy()
fk_l1 = last_fk_l1
data_consistency[k] = data_consistency[k - 1]
residue[k] = residue[k - 1]

last_data_consistency = data_consistency[k]
last_fk_l1 = fk_l1

y_k = f_k + constant_factor * (f_k - f_km1)

err = np.linalg.norm(x_test @ f_k - y_test) ** 2
prediction_error[j, fold] = err / y_points
iter_arr[j] = k

return prediction_error, iter_arr


def fista_cv(
matrix: np.ndarray,
s: np.ndarray,
matrix_test: np.ndarray,
s_test: np.ndarray,
max_iter: int,
lambda_vals: np.ndarray,
nonnegative: bool,
l_inv: float,
tol: float,
):
prediction_error, iter_arr = fista_cv_nb(
matrix, s, matrix_test, s_test, max_iter, lambda_vals, nonnegative, l_inv, tol
)

cv = prediction_error.mean(axis=1)
cvstd = prediction_error.std(axis=1)
return cv, cvstd, prediction_error, iter_arr
Loading