Repository URL to install this package:
|
Version:
0.0.7 ▾
|
import torch
from torch import as_tensor
import torchmetrics.functional
from torch.nn.functional import binary_cross_entropy, cross_entropy, nll_loss
import torch.nn.functional as F
import numpy as np
class Metric:
"""
General usage: `metric(y_label=y_label, y_pred=y_pred)`
"""
def __init__(self, name):
self.name = name
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
raise NotImplementedError
class AUCBinary(Metric):
"""
>>> from sklearn.metrics import roc_auc_score
>>> target = np.array([0, 0, 1, 1, 1], dtype=np.float32)
>>> preds_prob = np.array([0.13, 0.26, 0.95, 0.19, 0.34], dtype=np.float32)
>>> round(roc_auc_score(y_true=target, y_score=preds_prob), 5)
0.83333
>>> AUCBinary(pos_label=1)(y_label=as_tensor(target), y_pred=as_tensor(preds_prob))
tensor(0.8333)
>>> round(roc_auc_score(y_true=[1, 1, 0, 0, 0], y_score=preds_prob), 5)
0.16667
>>> AUCBinary(pos_label=0)(y_label=as_tensor(target), y_pred=as_tensor(preds_prob))
tensor(0.1667)
"""
def __init__(self, pos_label=1):
super(AUCBinary, self).__init__(name='AUC')
self.pos_label = pos_label
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
return torchmetrics.functional.auroc(preds=y_pred, target=y_label.int(), pos_label=self.pos_label)
class AccuracyBinary(Metric):
"""
>>> target = [0, 0, 1, 1, 1]
>>> preds_label = [0, 0, 1, 0, 0]
>>> from sklearn.metrics import accuracy_score
>>> round(accuracy_score(y_true=target, y_pred=preds_label), 5)
0.6
>>> np.isclose(AccuracyBinary()(y_label=as_tensor(target), y_pred=as_tensor(preds_label)).numpy(), 0.6)
True
>>> preds_prob = as_tensor([0.2, 0.2, 0.49, 0.2, 0.2])
>>> np.isclose(AccuracyBinary(0.5)(y_label=as_tensor(target), y_pred=preds_prob).numpy(), 0.4)
True
>>> np.isclose(AccuracyBinary(0.4)(y_label=as_tensor(target), y_pred=preds_prob).numpy(), 0.6)
True
"""
def __init__(self, threshold=0.5):
super(AccuracyBinary, self).__init__('Accuracy')
self.threshold = threshold
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
return torchmetrics.functional.accuracy(preds=y_pred, target=y_label.int(), threshold=self.threshold)
class AUCMulticlass(Metric):
def __init__(self, num_classes):
import warnings
warnings.warn('AUCMulticlass has not been tested.', UserWarning)
super(AUCMulticlass, self).__init__(name='AUC')
self.num_classes = num_classes
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
return torchmetrics.functional.auroc(preds=y_pred, target=y_label, num_classes=self.num_classes)
class AccuracyMulticlass(Metric):
def __init__(self, num_classes):
import warnings
warnings.warn('AccuracyMulticlass has not been tested.', UserWarning)
super(AccuracyMulticlass, self).__init__(name='Accuracy')
self.num_classes = num_classes
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
return torchmetrics.functional.accuracy(preds=y_pred, target=y_label, num_classes=self.num_classes)
class BinaryCrossEntropy(Metric):
"""
>>> preds_prob = [0.13, 0.26, 0.95, 0.19, 0.34]
>>> target = [0, 0, 1, 1, 1]
>>> from sklearn.metrics import log_loss
>>> round(log_loss(y_true=target, y_pred=preds_prob), 10)
0.6462402645
>>> np.isclose(BinaryCrossEntropy()(y_label=as_tensor(target), y_pred=as_tensor(preds_prob)).numpy(), 0.64624023)
True
"""
def __init__(self):
super(BinaryCrossEntropy, self).__init__('BinaryCrossEntropy')
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
return binary_cross_entropy(input=y_pred, target=y_label.float())
class CrossEntropy(Metric):
def __init__(self):
super(CrossEntropy, self).__init__('CrossEntropy')
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
return cross_entropy(input=y_pred, target=y_label)
class NLLLoss(Metric):
"""
input is of size N x C = 3 x 5,
each element in target has to have 0 <= value < C
>>> rng = np.random.default_rng(seed=3)
>>> input = as_tensor(rng.uniform(size=(3, 5)))
>>> target = torch.tensor([1, 0, 4])
>>> np.isclose(NLLLoss()(y_pred=F.log_softmax(input, dim=1), y_label=target).numpy(), 1.59098092)
True
>>> input = as_tensor(rng.uniform(size=(4, 2)))
>>> target = torch.tensor([0, 1, 0, 1])
>>> np.isclose(NLLLoss()(y_pred=F.log_softmax(input, dim=1), y_label=target).numpy(), 0.68170622)
True
>>> target = torch.tensor([0, 1, 0, 1], dtype=torch.float32)
>>> np.isclose(NLLLoss()(y_pred=F.log_softmax(input, dim=1), y_label=target).numpy(), 0.68170622)
True
"""
def __init__(self):
super(NLLLoss, self).__init__('NLLLoss')
def __call__(self, *, y_label: torch.Tensor, y_pred: torch.Tensor):
return nll_loss(input=y_pred, target=y_label.long())