Skip to content

fusion_bench.metrics

NYUv2 Tasks

fusion_bench.metrics.nyuv2

metric_classes = {'segmentation': SegmentationMetric, 'depth': DepthMetric, 'normal': NormalMetric, 'noise': NoiseMetric} module-attribute

SegmentationMetric

Bases: Metric

Source code in fusion_bench/metrics/nyuv2/segmentation.py
class SegmentationMetric(Metric):
    metric_names = ["mIoU", "pixAcc"]

    def __init__(self, num_classes=13):
        super().__init__()

        self.num_classes = num_classes
        self.add_state(
            "record",
            default=torch.zeros(
                (self.num_classes, self.num_classes), dtype=torch.int64
            ),
            dist_reduce_fx="sum",
        )

    def reset(self):
        self.record.zero_()

    def update(self, preds: Tensor, target: Tensor):
        preds = preds.softmax(1).argmax(1).flatten()
        target = target.long().flatten()

        k = (target >= 0) & (target < self.num_classes)
        inds = self.num_classes * target[k].to(torch.int64) + preds[k]
        self.record += torch.bincount(inds, minlength=self.num_classes**2).reshape(
            self.num_classes, self.num_classes
        )

    def compute(self):
        """
        return mIoU and pixel accuracy
        """
        h = cast(Tensor, self.record).float()
        iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h))
        acc = torch.diag(h).sum() / h.sum()
        return [torch.mean(iu), acc]
compute()

return mIoU and pixel accuracy

Source code in fusion_bench/metrics/nyuv2/segmentation.py
def compute(self):
    """
    return mIoU and pixel accuracy
    """
    h = cast(Tensor, self.record).float()
    iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h))
    acc = torch.diag(h).sum() / h.sum()
    return [torch.mean(iu), acc]

DepthMetric

Bases: Metric

Source code in fusion_bench/metrics/nyuv2/depth.py
class DepthMetric(Metric):
    metric_names = ["abs_err", "rel_err"]

    def __init__(self):
        super().__init__()

        self.add_state("abs_record", default=[], dist_reduce_fx="cat")
        self.add_state("rel_record", default=[], dist_reduce_fx="cat")
        self.add_state("batch_size", default=[], dist_reduce_fx="cat")

    def reset(self):
        self.abs_record = []
        self.rel_record = []
        self.batch_size = []

    def update(self, preds: Tensor, target: Tensor):
        binary_mask = (torch.sum(target, dim=1) != 0).unsqueeze(1)
        preds = preds.masked_select(binary_mask)
        target = target.masked_select(binary_mask)
        abs_err = torch.abs(preds - target)
        rel_err = torch.abs(preds - target) / target
        abs_err = torch.sum(abs_err) / torch.nonzero(binary_mask, as_tuple=False).size(
            0
        )
        rel_err = torch.sum(rel_err) / torch.nonzero(binary_mask, as_tuple=False).size(
            0
        )
        self.abs_record.append(abs_err)
        self.rel_record.append(rel_err)
        self.batch_size.append(torch.asarray(preds.size(0), device=preds.device))

    def compute(self):
        records = torch.stack(
            [torch.stack(self.abs_record), torch.stack(self.rel_record)]
        )
        batch_size = torch.stack(self.batch_size)
        return [(records[i] * batch_size).sum() / batch_size.sum() for i in range(2)]

NormalMetric

Bases: Metric

Source code in fusion_bench/metrics/nyuv2/normal.py
class NormalMetric(Metric):
    metric_names = ["mean", "median", "<11.25", "<22.5", "<30"]

    def __init__(self):
        super(NormalMetric, self).__init__()

        self.add_state("record", default=[], dist_reduce_fx="cat")

    def update(self, preds, target):
        # gt has been normalized on the NYUv2 dataset
        preds = preds / torch.norm(preds, p=2, dim=1, keepdim=True)
        binary_mask = torch.sum(target, dim=1) != 0
        error = (
            torch.acos(
                torch.clamp(
                    torch.sum(preds * target, 1).masked_select(binary_mask), -1, 1
                )
            )
            .detach()
            .cpu()
            .numpy()
        )
        error = np.degrees(error)
        self.record.append(torch.from_numpy(error))

    def compute(self):
        """
        returns mean, median, and percentage of pixels with error less than 11.25, 22.5, and 30 degrees ("mean", "median", "<11.25", "<22.5", "<30")
        """
        if self.record is None:
            return torch.asarray([0.0, 0.0, 0.0, 0.0, 0.0])

        records = torch.concatenate(self.record)
        return [
            torch.mean(records),
            torch.median(records),
            torch.mean((records < 11.25) * 1.0),
            torch.mean((records < 22.5) * 1.0),
            torch.mean((records < 30) * 1.0),
        ]
compute()

returns mean, median, and percentage of pixels with error less than 11.25, 22.5, and 30 degrees ("mean", "median", "<11.25", "<22.5", "<30")

Source code in fusion_bench/metrics/nyuv2/normal.py
def compute(self):
    """
    returns mean, median, and percentage of pixels with error less than 11.25, 22.5, and 30 degrees ("mean", "median", "<11.25", "<22.5", "<30")
    """
    if self.record is None:
        return torch.asarray([0.0, 0.0, 0.0, 0.0, 0.0])

    records = torch.concatenate(self.record)
    return [
        torch.mean(records),
        torch.median(records),
        torch.mean((records < 11.25) * 1.0),
        torch.mean((records < 22.5) * 1.0),
        torch.mean((records < 30) * 1.0),
    ]

NoiseMetric

Bases: Metric

Source code in fusion_bench/metrics/nyuv2/noise.py
class NoiseMetric(Metric):
    def __init__(self):
        super().__init__()

    def update(self, preds: Tensor, target: Tensor):
        pass

    def compute(self):
        return [1]

Continual Learning Metrics

fusion_bench.metrics.continual_learning

compute_backward_transfer(acc_Ti, acc_ii)

Compute the backward transfer (BWT) of a model on a set of tasks.

Equation

\(BWT = \frac{1}{n} \sum_{k=1}^{n} (acc_{T,i}[k] - acc_{i,i}[k])\)

Returns:

  • float ( float ) –

    The backward transfer of the model.

Source code in fusion_bench/metrics/continual_learning/backward_transfer.py
def compute_backward_transfer(
    acc_Ti: Dict[str, float], acc_ii: Dict[str, float]
) -> float:
    R"""
    Compute the backward transfer (BWT) of a model on a set of tasks.

    Equation:
        $BWT = \frac{1}{n} \sum_{k=1}^{n} (acc_{T,i}[k] - acc_{i,i}[k])$

    Returns:
        float: The backward transfer of the model.
    """
    assert set(acc_ii.keys()) == set(acc_Ti.keys())
    bwt = 0
    for task_name in acc_ii:
        bwt += acc_Ti[task_name] - acc_ii[task_name]
    return bwt / len(acc_ii)