Skip to content

Commit

Permalink
Merge pull request #12 from stat-ml/horvitz-thompson
Browse files Browse the repository at this point in the history
Cov leverages
  • Loading branch information
kirill-fedyanin authored Jun 3, 2020
2 parents 9d478c4 + 893a997 commit 1fe46f8
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 15 deletions.
2 changes: 1 addition & 1 deletion alpaca/model/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def eval(self):
def __call__(self, x, reduction='default', **kwargs):
if 'dropout_mask' in kwargs and isinstance(kwargs['dropout_mask'], list):
masks = kwargs.pop('dropout_mask')
res = torch.stack([m(x, dropout_mask = dpm, **kwargs) for m, dpm in zip(self.models, masks)])
res = torch.stack([m(x, dropout_mask=dpm, **kwargs) for m, dpm in zip(self.models, masks)])
else:
res = torch.stack([m(x, **kwargs) for m in self.models])

Expand Down
20 changes: 10 additions & 10 deletions alpaca/uncertainty_estimator/bald.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,10 @@ def _aquisition(self, mcd_runs):
return ue
elif self.acquisition == 'bald':
print('bald')
return _bald(mcd_runs)
return bald(mcd_runs)
elif self.acquisition == 'bald_normed':
print('normed bald')
return _bald_normed(mcd_runs)
return bald_normed(mcd_runs)
else:
raise ValueError

Expand All @@ -109,26 +109,26 @@ def estimate(self, x_pool, *args):
with torch.no_grad():
logits = np.array(self.ensemble(x_pool, reduction=None).cpu())

return _bald(np.swapaxes(logits, 0, 1))
return bald(np.swapaxes(logits, 0, 1))


def _entropy(x):
def entropy(x):
return np.sum(-x*np.log(np.clip(x, 1e-8, 1)), axis=-1)


def _bald(logits):
def bald(logits):
predictions = softmax(logits, axis=-1)

predictive_entropy = _entropy(np.mean(predictions, axis=1))
expected_entropy = np.mean(_entropy(predictions), axis=1)
predictive_entropy = entropy(np.mean(predictions, axis=1))
expected_entropy = np.mean(entropy(predictions), axis=1)

return predictive_entropy - expected_entropy


def _bald_normed(logits):
def bald_normed(logits):
predictions = softmax(logits, axis=-1)

predictive_entropy = _entropy(np.mean(predictions, axis=1))
expected_entropy = np.mean(_entropy(predictions), axis=1)
predictive_entropy = entropy(np.mean(predictions, axis=1))
expected_entropy = np.mean(entropy(predictions), axis=1)

return (predictive_entropy - expected_entropy) / predictive_entropy
13 changes: 9 additions & 4 deletions alpaca/uncertainty_estimator/masks.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from dppy.finite_dpps import FiniteDPP


DEFAULT_MASKS = ['mc_dropout', 'ht_decorrelating', 'ht_dpp', 'ht_k_dpp', 'ht_leverages']
DEFAULT_MASKS = ['mc_dropout', 'ht_decorrelating', 'ht_dpp', 'ht_k_dpp', 'ht_leverages', 'cov_leverages']


# It's better to use this function to get the mask then call them directly
Expand All @@ -26,7 +26,8 @@ def build_masks(names=None, **kwargs):
'ht_k_dpp': KDPPMask(ht_norm=True),
'cov_dpp': DPPMask(ht_norm=True, covariance=True),
'cov_k_dpp': KDPPMask(ht_norm=True, covariance=True),
'ht_leverages': LeverageScoreMask(ht_norm=True, lambda_=1)
'ht_leverages': LeverageScoreMask(ht_norm=True, lambda_=1),
'cov_leverages': LeverageScoreMask(ht_norm=True, lambda_=1, covariance=True),
}
if names is None:
return masks
Expand Down Expand Up @@ -115,12 +116,13 @@ def reset(self):


class LeverageScoreMask:
def __init__(self, dry_run=True, ht_norm=True, lambda_=1):
def __init__(self, dry_run=True, ht_norm=True, lambda_=1, covariance=False):
self.layer_correlations = {}
self.dry_run = dry_run
self.ht_norm = ht_norm
self.norm = {}
self.lambda_ = lambda_
self.covariance = covariance

def __call__(self, x, dropout_rate=0.5, layer_num=0):
mask_len = x.shape[-1]
Expand All @@ -129,7 +131,10 @@ def __call__(self, x, dropout_rate=0.5, layer_num=0):
if layer_num not in self.layer_correlations:
x_matrix = x.cpu().numpy()
self.x_matrix = x_matrix
K = np.corrcoef(x_matrix.T)
if self.covariance:
K = np.cov(x_matrix.T)
else:
K = np.corrcoef(x_matrix.T)
I = np.eye(len(K))
leverages_matrix = np.dot(K, np.linalg.inv(K+self.lambda_*I))
probabilities = np.diagonal(leverages_matrix)
Expand Down

0 comments on commit 1fe46f8

Please sign in to comment.