Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions src/squlearn/qnn/loss/cross_entropy_loss.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Log Loss for QNNs."""

from typing import Union
from typing import Union, Optional

import numpy as np

Expand Down Expand Up @@ -31,7 +31,7 @@ def gradient_args_tuple(self) -> tuple:
return ("f", "dfdp", "dfdop")
return ("f", "dfdp")

def value(self, value_dict: dict, **kwargs) -> float:
def value(self, value_dict: dict, ground_truth: np.ndarray, weights: Optional[np.ndarray] = None) -> float:
r"""Calculates the cross entropy loss.

This function calculates the cross entropy loss between the probability values in
Expand All @@ -44,17 +44,13 @@ def value(self, value_dict: dict, **kwargs) -> float:
Args:
value_dict (dict): Contains calculated values of the model
ground_truth (np.ndarray): The true values :math:`y\left(x_i\right)`
weights (np.ndarray): Weight for each data point, if None all data points count the
weights (Optional[np.ndarray]): Weight for each data point, if None all data points count the
same

Returns:
Loss value
"""
if "ground_truth" not in kwargs:
raise AttributeError("CrossEntropyLoss requires ground_truth.")

ground_truth = kwargs["ground_truth"]
weights = kwargs.get("weights") or np.ones_like(ground_truth)
weights = weights or np.ones_like(ground_truth)

probability_values = np.clip(value_dict["f"], self._eps, 1.0 - self._eps)
if probability_values.ndim == 1:
Expand Down
11 changes: 4 additions & 7 deletions src/squlearn/qnn/loss/mean_squared_error.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""MSE for QNNs."""

from typing import Union
from typing import Union, Optional

import numpy as np

Expand Down Expand Up @@ -32,7 +32,7 @@ def gradient_args_tuple(self) -> tuple:
return ("f", "dfdp", "dfdop")
return ("f", "dfdp")

def value(self, value_dict: dict, **kwargs) -> float:
def value(self, value_dict: dict, ground_truth: np.ndarray, weights: Optional[np.ndarray] = None) -> float:
r"""Calculates the mean squared error.

This function calculates the mean squared error between the values in `value_dict` and
Expand All @@ -44,16 +44,13 @@ def value(self, value_dict: dict, **kwargs) -> float:
Args:
value_dict (dict): Contains calculated values of the model
ground_truth (np.ndarray): The true values :math:`y\left(x_i\right)`
weights (np.ndarray): Weight for each data point, if None all data points count the
weights: (Optional[np.ndarray]): Weight for each data point, if None all data points count the
same

Returns:
Loss value
"""
if "ground_truth" not in kwargs:
raise AttributeError("SquaredLoss requires ground_truth.")
ground_truth = kwargs["ground_truth"]
if "weights" in kwargs and kwargs["weights"] is not None:
if weights:
raise ValueError("Weights are not supported for MeanSquaredError.")
return np.sum(np.square(value_dict["f"] - ground_truth)) / len(ground_truth)

Expand Down
13 changes: 5 additions & 8 deletions src/squlearn/qnn/loss/ode_loss.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""ODE Loss for QNNs."""

from typing import Union
from typing import Union, Optional

import numpy as np
import sympy as sp
Expand Down Expand Up @@ -204,7 +204,7 @@ def _derivatives_in_array_format(self, loss_values):
loss_values["dfdxdx"][:, 0, 0],
)

def value(self, value_dict: dict, **kwargs) -> float:
def value(self, value_dict: dict, ground_truth: np.ndarray, weights: Optional[np.ndarray]) -> float:
r"""
Calculates the squared loss of the loss function for the ODE as

Expand All @@ -219,18 +219,15 @@ def value(self, value_dict: dict, **kwargs) -> float:
Args:
value_dict (dict): Contains calculated values of the model
ground_truth (np.ndarray): The true values :math:`f_{ref}\left(x_i\right)`
weights (np.ndarray): Weight for each data point, if None all data points
weights (Optional[np.ndarray]): Weight for each data point, if None all data points
count the same

Returns:
Loss value
"""
if "ground_truth" not in kwargs:
raise AttributeError("SquaredLoss requires ground_truth.")
ground_truth = kwargs["ground_truth"]
weights = kwargs.get("weights") or np.ones_like(ground_truth)
weights = weights or np.ones_like(ground_truth)

multiple_output = "multiple_output" in kwargs and kwargs["multiple_output"]
multiple_output = None # Not used in this implementation

functional_loss, initial_value_loss_f, initial_value_loss_df = 0, 0, 0

Expand Down
10 changes: 5 additions & 5 deletions src/squlearn/qnn/loss/parameter_regularization_loss.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Parameter Regularization Loss for QNNs."""

from collections.abc import Callable
from typing import Union
from typing import Union, Optional

import numpy as np

Expand Down Expand Up @@ -60,7 +60,7 @@ def gradient_args_tuple(self) -> tuple:
"""Returns evaluation tuple for loss gradient calculation."""
return tuple()

def value(self, value_dict: dict, **kwargs) -> float:
def value(self, value_dict: dict, iteration: Optional[int]) -> float:
r"""Returns the variance.

This function returns the weighted variance as
Expand All @@ -70,16 +70,16 @@ def value(self, value_dict: dict, **kwargs) -> float:

Args:
value_dict (dict): Contains calculated values of the model
iteration (int): iteration number, if alpha is a callable function
iteration (Optional[int]): iteration number, if alpha is a callable function

Returns:
Loss value
"""

if callable(self._alpha):
if "iteration" not in kwargs:
if iteration is None:
raise AttributeError("If alpha is callable, iteration is required.")
alpha = self._alpha(kwargs["iteration"])
alpha = self._alpha(iteration)
else:
alpha = self._alpha

Expand Down
33 changes: 25 additions & 8 deletions src/squlearn/qnn/loss/qnn_loss_base.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
"""Loss Base Classes for QNNs."""

import abc
import inspect
from collections.abc import Callable
from typing import Union
from typing import Union, overload, Optional
import numpy as np


Expand Down Expand Up @@ -42,8 +43,18 @@ def gradient_args_tuple(self) -> tuple:
"""Returns evaluation tuple for loss gradient calculation."""
raise NotImplementedError()

# Signature for SquaredLoss, ODELoss, MeanSquaredError, CrossEntropyLoss
@overload
def value(self, value_dict: dict, ground_truth: np.ndarray, weights: Optional[np.ndarray]) -> float:
...

# Signature for VarianceLoss, ParameterRegularizationLoss
@overload
def value(self, value_dict: dict, iteration: Optional[int]) -> float:
...

@abc.abstractmethod
def value(self, value_dict: dict, **kwargs) -> float:
def value(self, **kwargs) -> float:
"""Calculates and returns the loss value."""
raise NotImplementedError()

Expand Down Expand Up @@ -196,9 +207,19 @@ def value(self, value_dict: dict, **kwargs) -> float:
Returns:
float: Composed loss value
"""
params_l1 = inspect.signature(self._l1.value).parameters
params_l2 = inspect.signature(self._l2.value).parameters

def _child_value(loss, params):
if "ground_truth" in params:
if "ground_truth" not in kwargs:
raise TypeError("ground_truth is required for this loss.")
return loss.value(value_dict, kwargs["ground_truth"], kwargs.get("weights", None))
else:
return loss.value(value_dict, kwargs.get("iteration", None))

value_l1 = self._l1.value(value_dict, **kwargs)
value_l2 = self._l2.value(value_dict, **kwargs)
value_l1 = _child_value(self._l1, params_l1)
value_l2 = _child_value(self._l2, params_l2)

if self._composition == "*":
return value_l1 * value_l2
Expand Down Expand Up @@ -333,10 +354,6 @@ def gradient_args_tuple(self) -> tuple:

def value(self, value_dict: dict, **kwargs) -> float:
"""Returns constant or iteration dependent loss value

Args:
value_dict (dict): Contains calculated values of the model
iteration (int): iteration number, if value is a callable function
"""
if callable(self._value):
if "iteration" not in kwargs:
Expand Down
11 changes: 4 additions & 7 deletions src/squlearn/qnn/loss/squared_loss.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Squared Loss for QNNs."""

from typing import Union
from typing import Union, overload, Optional

import numpy as np

Expand Down Expand Up @@ -32,7 +32,7 @@ def gradient_args_tuple(self) -> tuple:
return ("f", "dfdp", "dfdop")
return ("f", "dfdp")

def value(self, value_dict: dict, **kwargs) -> float:
def value(self, value_dict: dict, ground_truth: np.ndarray, weights: Optional[np.ndarray] = None) -> float:
r"""Calculates the squared loss.

This function calculates the squared loss between the values in `value_dict` and
Expand All @@ -44,16 +44,13 @@ def value(self, value_dict: dict, **kwargs) -> float:
Args:
value_dict (dict): Contains calculated values of the model
ground_truth (np.ndarray): The true values :math:`y\left(x_i\right)`
weights (np.ndarray): Weight for each data point, if None all data points count the
weights: (Optional[np.ndarray]): Weight for each data point, if None all data points count the
same

Returns:
Loss value
"""
if "ground_truth" not in kwargs:
raise AttributeError("SquaredLoss requires ground_truth.")
ground_truth = kwargs["ground_truth"]
weights = kwargs.get("weights") or np.ones_like(ground_truth)
weights = weights or np.ones_like(ground_truth)
return np.sum(np.multiply(np.square(value_dict["f"] - ground_truth), weights))

def variance(self, value_dict: dict, **kwargs) -> float:
Expand Down
10 changes: 5 additions & 5 deletions src/squlearn/qnn/loss/variance_loss.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Variance Loss for QNNs."""

from collections.abc import Callable
from typing import Union
from typing import Union, Optional

import numpy as np

Expand Down Expand Up @@ -41,7 +41,7 @@ def gradient_args_tuple(self) -> tuple:
return ("var", "dvardp", "dvardop")
return ("var", "dvardp")

def value(self, value_dict: dict, **kwargs) -> float:
def value(self, value_dict: dict, iteration: Optional[int]) -> float:
r"""Returns the variance.

This function returns the weighted variance as
Expand All @@ -51,16 +51,16 @@ def value(self, value_dict: dict, **kwargs) -> float:

Args:
value_dict (dict): Contains calculated values of the model
iteration (int): iteration number, if alpha is a callable function
iteration (Optional[int]): iteration number, if alpha is a callable function

Returns:
Loss value
"""

if callable(self._alpha):
if "iteration" not in kwargs:
if iteration is None:
raise AttributeError("If alpha is callable, iteration is required.")
alpha = self._alpha(kwargs["iteration"])
alpha = self._alpha(iteration)
else:
alpha = self._alpha

Expand Down
53 changes: 35 additions & 18 deletions src/squlearn/qnn/util/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,12 +298,17 @@ def _fun(theta):

loss_values = qnn.evaluate(input_values, param_, param_op_, *loss.loss_args_tuple)

loss_value = loss.value(
loss_values,
ground_truth=ground_truth,
weights=weights,
iteration=iteration,
)
if ground_truth is not None:
loss_value = loss.value(
loss_values,
ground_truth=ground_truth,
weights=weights,
)
else:
loss_value = loss.value(
loss_values,
iteration=iteration
)
return loss_value

def _grad(theta):
Expand Down Expand Up @@ -333,12 +338,19 @@ def _grad(theta):
weights=weights,
iteration=iteration,
)
loss_values = loss.value(
qnn.evaluate(input_values, param_, param_op_, *loss.loss_args_tuple),
ground_truth=ground_truth,
weights=weights,
iteration=iteration,
)

if ground_truth is not None:
loss_values = loss.value(
qnn.evaluate(input_values, param_, param_op_, *loss.loss_args_tuple),
ground_truth=ground_truth,
weights=weights,
)
else:
loss_values = loss.value(
qnn.evaluate(input_values, param_, param_op_, *loss.loss_args_tuple),
iteration=iteration,
)

shot_control.set_shots_for_grad(value=loss_values, variance=loss_variance)
else:
raise ValueError("Loss variance necessary for ShotsFromRSTD shot control")
Expand Down Expand Up @@ -474,12 +486,17 @@ def train_mini_batch(
input_values[idcs[batch_slice]], param, param_op, *loss.loss_args_tuple
)

batch_loss = loss.value(
loss_values,
ground_truth=ground_truth[idcs[batch_slice]],
weights=weights[idcs[batch_slice]] if weights is not None else None,
iteration=epoch,
)
if ground_truth is not None:
batch_loss = loss.value(
loss_values,
ground_truth=ground_truth[idcs[batch_slice]],
weights=weights[idcs[batch_slice]] if weights is not None else None,
)
else:
batch_loss = loss.value(
loss_values,
iteration=epoch,
)

accumulated_loss += batch_loss

Expand Down
4 changes: 1 addition & 3 deletions tests/qnn/loss/test_mean_squared_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ def test_properties_and_requirements(self):
loss._opt_param_op = False
assert loss.gradient_args_tuple == ("f", "dfdp")

# missing ground_truth should raise for value/variance/gradient
with pytest.raises(AttributeError):
loss.value({"f": np.array([0.0])})
# missing ground_truth should raise for variance/gradient
with pytest.raises(AttributeError):
loss.variance({"f": np.array([0.0]), "var": np.array([0.0])})
with pytest.raises(AttributeError):
Expand Down
11 changes: 4 additions & 7 deletions tests/qnn/test_loss.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@
import scipy as sp
import pytest
import numpy as np
import pytest
import sympy

from squlearn.qnn import CrossEntropyLoss
from squlearn import Executor
from squlearn.encoding_circuit import ChebyshevPQC
from squlearn.observables import SummedPaulis
from squlearn.qnn.qnnr import QNNRegressor
from squlearn.optimizers import Adam, LBFGSB
from squlearn.qnn import QNNRegressor
from squlearn.qnn.util import get_lr_decay
from squlearn.optimizers import Adam
from squlearn.qnn import CrossEntropyLoss
from squlearn.qnn import ODELoss
from squlearn.qnn import QNNRegressor


class TestCrossEntropyLoss:
Expand Down
Loading