| # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. | |
| # SPDX-License-Identifier: LicenseRef-NvidiaProprietary | |
| # | |
| # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual | |
| # property and proprietary rights in and to this material, related | |
| # documentation and any modifications thereto. Any use, reproduction, | |
| # disclosure or distribution of this material and related documentation | |
| # without an express license agreement from NVIDIA CORPORATION or | |
| # its affiliates is strictly prohibited. | |
| """Inception Score (IS) from the paper "Improved techniques for training | |
| GANs". Matches the original implementation by Salimans et al. at | |
| https://github.com/openai/improved-gan/blob/master/inception_score/model.py""" | |
| import numpy as np | |
| from . import metric_utils | |
| #---------------------------------------------------------------------------- | |
| def compute_is(opts, num_gen, num_splits): | |
| # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz | |
| # detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl' | |
| detector_url = 'file:///home/tiger/nfs/myenv/cache/useful_ckpts/inception-2015-12-05.pkl' | |
| detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer. | |
| gen_probs = metric_utils.compute_feature_stats_for_generator( | |
| opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, | |
| capture_all=True, max_items=num_gen).get_all() | |
| if opts.rank != 0: | |
| return float('nan'), float('nan') | |
| scores = [] | |
| for i in range(num_splits): | |
| part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits] | |
| kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True))) | |
| kl = np.mean(np.sum(kl, axis=1)) | |
| scores.append(np.exp(kl)) | |
| return float(np.mean(scores)), float(np.std(scores)) | |
| #---------------------------------------------------------------------------- | |