Source code for c3.libraries.estimators

"""Collection of estimator functions, to compare two sets of (noisy) data."""

import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp


estimators = dict()


[docs]def estimator_reg_deco(func): """ Decorator for making registry of functions """ estimators[str(func.__name__)] = func return func
[docs]@estimator_reg_deco def mean_dist(exp_values, sim_values, exp_stds, shots): """Return the root mean squared of the differences.""" diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tf.reduce_mean(diffs)
[docs]@estimator_reg_deco def median_dist(exp_values, sim_values, exp_stds, shots): """Return the median of the differences.""" diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tfp.stats.percentile(diffs, 50.0, interpolation="midpoint")
[docs]@estimator_reg_deco def rms_dist(exp_values, sim_values, exp_stds, shots): """Return the root mean squared of the differences.""" diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tf.sqrt(tf.reduce_mean(diffs**2))
[docs]@estimator_reg_deco def mean_sim_stds_dist(exp_values, sim_values, exp_stds, shots): """Return the mean of the distance in number of exp_stds away.""" sim_std = tf.sqrt(sim_values * (1 - sim_values) / shots) diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tf.reduce_mean(diffs / sim_std)
[docs]@estimator_reg_deco def rms_sim_stds_dist(exp_values, sim_values, exp_stds, shots): """Return the root mean squared of the differences measured in exp_stds.""" sim_std = tf.sqrt(sim_values * (1 - sim_values) / shots) diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tf.sqrt(tf.reduce_mean((diffs / sim_std) ** 2))
[docs]@estimator_reg_deco def mean_exp_stds_dist(exp_values, sim_values, exp_stds, shots): """Return the mean of the distance in number of exp_stds away.""" diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tf.reduce_mean(diffs / exp_stds)
[docs]@estimator_reg_deco def rms_exp_stds_dist(exp_values, sim_values, exp_stds, shots): """Return the root mean squared of the differences measured in exp_stds.""" diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tf.sqrt(tf.reduce_mean((diffs / exp_stds) ** 2))
[docs]@estimator_reg_deco def std_of_diffs(exp_values, sim_values, exp_stds, shots): """Return the std of the distances.""" diffs = tf.abs(tf.subtract(exp_values, sim_values)) return tf.math.reduce_std(diffs)
[docs]@estimator_reg_deco def neg_loglkh_binom(exp_values, sim_values, exp_stds, shots): """ Average likelihood of the experimental values with binomial distribution. Return the likelihood of the experimental values given the simulated values, and given a binomial distribution function. """ binom = tfp.distributions.Binomial(total_count=shots, probs=sim_values) loglkhs = binom.log_prob(exp_values * shots) loglkh = tf.reduce_mean(loglkhs) return -loglkh
[docs]@estimator_reg_deco def neg_loglkh_binom_norm(exp_values, sim_values, exp_stds, shots): """ Average likelihood of the exp values with normalised binomial distribution. Return the likelihood of the experimental values given the simulated values, and given a binomial distribution function that is normalised to give probability 1 at the top of the distribution. """ binom = tfp.distributions.Binomial(total_count=shots, probs=sim_values) loglkhs = binom.log_prob(exp_values * shots) - binom.log_prob(sim_values * shots) loglkh = tf.reduce_mean(loglkhs) return -loglkh
[docs]@estimator_reg_deco def neg_loglkh_gauss(exp_values, sim_values, exp_stds, shots): """ Likelihood of the experimental values. The distribution is assumed to be binomial (approximated by a gaussian). """ std = tf.sqrt(sim_values * (1 - sim_values) / shots) mean = sim_values gauss = tfp.distributions.Normal(mean, std) loglkhs = gauss.log_prob(exp_values) loglkh = tf.reduce_mean(loglkhs) return -loglkh
[docs]@estimator_reg_deco def neg_loglkh_gauss_norm(exp_values, sim_values, exp_stds, shots): """ Likelihood of the experimental values. The distribution is assumed to be binomial (approximated by a gaussian) that is normalised to give probability 1 at the top of the distribution. """ std = tf.sqrt(sim_values * (1 - sim_values) / shots) mean = sim_values gauss = tfp.distributions.Normal(mean, std) loglkhs = gauss.log_prob(exp_values) - gauss.log_prob(mean) loglkh = tf.reduce_mean(loglkhs) return -loglkh
[docs]@estimator_reg_deco def neg_loglkh_gauss_norm_sum(exp_values, sim_values, exp_stds, shots): """ Likelihood of the experimental values. The distribution is assumed to be binomial (approximated by a gaussian) that is normalised to give probability 1 at the top of the distribution. """ std = tf.sqrt(sim_values * (1 - sim_values) / shots) mean = sim_values gauss = tfp.distributions.Normal(mean, std) loglkhs = gauss.log_prob(exp_values) - gauss.log_prob(mean) loglkh = tf.reduce_sum(loglkhs) return -loglkh
[docs]@estimator_reg_deco def g_LL_prime(exp_values, sim_values, exp_stds, shots): std = tf.sqrt(sim_values * (1 - sim_values) / shots) return tf.reduce_mean(((exp_values - sim_values) ** 2 / std**2 - 1) / 2)
[docs]def dv_g_LL_prime(gs, dv_gs, weights): K = np.sum(weights) g = 0 for ii in range(len(weights)): g += weights[ii] * dv_gs[ii] return g / K
[docs]def g_LL_prime_combined(gs, weights): K = np.sum(weights) return np.sum(np.array(weights) * gs) / K
[docs]@estimator_reg_deco def neg_loglkh_multinom(exp_values, sim_values, exp_stds, shots): """ Average likelihood of the experimental values with multinomial distribution. Return the likelihood of the experimental values given the simulated values, and given a multinomial distribution function. """ multi = tfp.distributions.Multinomial( total_count=tf.reshape(shots, [shots.shape[0]]), probs=sim_values ) loglkhs = multi.log_prob(exp_values * shots) loglkh = tf.reduce_mean(loglkhs) return -loglkh
[docs]@estimator_reg_deco def neg_loglkh_multinom_norm(exp_values, sim_values, exp_stds, shots): """ Average likelihood of the experimental values with multinomial distribution. Return the likelihood of the experimental values given the simulated values, and given a multinomial distribution function that is normalised to give probability 1 at the top of the distribution. """ multi = tfp.distributions.Multinomial( total_count=tf.reshape(shots, [shots.shape[0]]), probs=sim_values ) loglkhs = multi.log_prob(exp_values * shots) - multi.log_prob(sim_values * shots) loglkh = tf.reduce_mean(loglkhs) return -loglkh - 1.5