Skip to content

Commit

Permalink
Fix error in matching by resetting index after sampling + test matchi…
Browse files Browse the repository at this point in the history
…ng (#47)

* Fix error in matching by resetting index after sampling + test matching

* Refactor interface test
  • Loading branch information
kirilklein authored Oct 19, 2024
1 parent fc2445a commit 887c4f5
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 81 deletions.
2 changes: 1 addition & 1 deletion CausalEstimate/core/effect_computation.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def compute_bootstrap_effects(
ps_col,
treatment_col,
common_support_threshold,
)
).reset_index(drop=True)
# log_sample_stats(sample, treatment_col, outcome_col, ps_col)
compute_effects_for_sample(
estimators=estimators,
Expand Down
134 changes: 54 additions & 80 deletions tests/test_interface/test_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,29 +44,27 @@ def setUpClass(cls):
"predicted_outcome_control": outcome_control_probability,
}
)

def test_compute_effect_no_bootstrap(self):
estimator = Estimator(methods=["AIPW", "TMLE"], effect_type="ATE")
# Define estimator-specific arguments
method_args = {
cls.method_args = {
"AIPW": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
"predicted_outcome_col": "predicted_outcome",
},
"TMLE": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
"predicted_outcome_col": "predicted_outcome",
# Add TMLE-specific arguments if necessary
},
}

def test_compute_effect_no_bootstrap(self):
estimator = Estimator(methods=["AIPW", "TMLE"], effect_type="ATE")
# Define estimator-specific arguments
results = estimator.compute_effect(
self.sample_data,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
method_args=method_args,
method_args=self.method_args,
)

# Check that results are returned for all specified methods
Expand All @@ -88,26 +86,14 @@ def test_compute_effect_no_bootstrap(self):
def test_compute_effect_with_bootstrap(self):
estimator = Estimator(methods=["AIPW", "TMLE"], effect_type="ATE")
# Define estimator-specific arguments
method_args = {
"AIPW": {
"predicted_outcome_col": "predicted_outcome",
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
},
"TMLE": {
"predicted_outcome_col": "predicted_outcome",
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
},
}
results = estimator.compute_effect(
self.sample_data,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
bootstrap=True,
n_bootstraps=10,
method_args=method_args,
method_args=self.method_args,
)

# Check that results are returned for all specified methods
Expand Down Expand Up @@ -139,24 +125,13 @@ def test_estimator_specific_params(self):
methods=["AIPW", "TMLE"], effect_type="ATE", method_params=method_params
)
# Define estimator-specific arguments
method_args = {
"AIPW": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
},
"TMLE": {
"predicted_outcome_col": "predicted_outcome",
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
# Add TMLE-specific arguments if necessary
},
}

results = estimator.compute_effect(
self.sample_data,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
method_args=method_args,
method_args=self.method_args,
)

# Ensure the code runs without errors
Expand All @@ -168,20 +143,14 @@ def test_missing_columns(self):
# Remove the 'treatment' column to simulate missing data
sample_data_missing = self.sample_data.drop(columns=["treatment"])
# Define estimator-specific arguments
method_args = {
"AIPW": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
},
}
with self.assertRaises(ValueError) as context:
estimator.compute_effect(
sample_data_missing,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
bootstrap=False,
method_args=method_args,
method_args=self.method_args,
)
self.assertTrue(context.exception)

Expand All @@ -195,24 +164,13 @@ def test_invalid_method(self):
def test_estimator_access(self):
estimator = Estimator(methods=["AIPW", "TMLE"], effect_type="ATE")
# Define estimator-specific arguments
method_args = {
"AIPW": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
"predicted_outcome_col": "predicted_outcome",
},
"TMLE": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
"predicted_outcome_col": "predicted_outcome",
},
}

estimator.compute_effect(
self.sample_data,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
method_args=method_args,
method_args=self.method_args,
)

# Access the AIPW estimator instance
Expand All @@ -226,20 +184,15 @@ def test_estimator_access(self):
def test_parallel_bootstrapping(self):
estimator = Estimator(methods=["AIPW"], effect_type="ATE")
# Define estimator-specific arguments
method_args = {
"AIPW": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
},
}

results = estimator.compute_effect(
self.sample_data,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
bootstrap=True,
n_bootstraps=10,
method_args=method_args,
method_args=self.method_args,
# Include n_jobs parameter if your implementation supports parallel processing
)

Expand All @@ -255,36 +208,27 @@ def test_input_validation(self):
sample_data_with_nan = self.sample_data.copy()
sample_data_with_nan.loc[0, "outcome"] = np.nan
# Define estimator-specific arguments
method_args = {
"AIPW": {
"ps_col": "propensity_score",
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
},
}

with self.assertRaises(ValueError) as context:
estimator.compute_effect(
sample_data_with_nan,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
method_args=method_args,
method_args=self.method_args,
)
self.assertIsInstance(context.exception, ValueError)

def test_compute_effect_with_additional_columns(self):
# Assuming IPW requires 'propensity_score' column
estimator = Estimator(methods=["IPW"], effect_type="ATE")
# Define estimator-specific arguments
method_args = {
"IPW": {},
}

results = estimator.compute_effect(
self.sample_data,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
method_args=method_args,
)
self.assertIn("IPW", results)
self.assertIsInstance(results["IPW"]["effect"], float)
Expand All @@ -303,18 +247,13 @@ def test_compute_effect_without_method_args(self):
def test_common_support_filtering(self):
estimator = Estimator(methods=["AIPW"], effect_type="ATE")
# Define estimator-specific arguments
method_args = {
"AIPW": {
"predicted_outcome_treated_col": "predicted_outcome_treated",
"predicted_outcome_control_col": "predicted_outcome_control",
},
}

results = estimator.compute_effect(
self.sample_data,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
method_args=method_args,
method_args=self.method_args,
apply_common_support=True,
common_support_threshold=0.01,
)
Expand All @@ -333,6 +272,41 @@ def test_matching(self):
)
self.assertIn("MATCHING", results)

def test_matching_bootstrap(self):
df = self.sample_data.copy()
df["treatment"] = np.random.binomial(1, 0.1, size=len(df))
estimator = Estimator(methods=["MATCHING"], effect_type="ATE")
results = estimator.compute_effect(
df,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
bootstrap=True,
n_bootstraps=10,
)
self.assertIn("MATCHING", results)

def test_combined_bootstrap(self):
df = self.sample_data.copy()
df["treatment"] = np.random.binomial(1, 0.1, size=len(df))
estimator = Estimator(
methods=["AIPW", "MATCHING", "IPW", "TMLE"], effect_type="ATE"
)

results = estimator.compute_effect(
df,
treatment_col="treatment",
outcome_col="outcome",
ps_col="propensity_score",
bootstrap=True,
n_bootstraps=10,
method_args=self.method_args,
)
self.assertIn("AIPW", results)
self.assertIn("TMLE", results)
self.assertIn("MATCHING", results)
self.assertIn("IPW", results)


if __name__ == "__main__":
unittest.main()

0 comments on commit 887c4f5

Please sign in to comment.