diff --git a/.github/workflows/apo_sim.yml b/.github/workflows/apo_sim.yml index a86186c..31ee1cb 100644 --- a/.github/workflows/apo_sim.yml +++ b/.github/workflows/apo_sim.yml @@ -17,7 +17,8 @@ jobs: strategy: matrix: script: [ - 'scripts/irm/irm_apo_coverage.py', + 'scripts/irm/apo.py', + 'scripts/irm/apos.py', ] steps: @@ -47,20 +48,27 @@ jobs: with: ref: ${{ env.TARGET_BRANCH }} + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.7.8" + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.12' + python-version-file: "monte-cover/pyproject.toml" - - name: Install dependencies + - name: Install Monte-Cover run: | - python -m pip install --upgrade pip - pip install -r requirements.txt + cd monte-cover + uv venv + uv sync - name: Install DoubleML from correct branch run: | - pip uninstall -y doubleml - pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}" + source monte-cover/.venv/bin/activate + uv pip uninstall doubleml + uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}" - name: Set up Git configuration run: | @@ -68,7 +76,9 @@ jobs: git config --global user.email 'github-actions@github.com' - name: Run scripts - run: python ${{ matrix.script }} + run: | + source monte-cover/.venv/bin/activate + uv run ${{ matrix.script }} - name: Commit any existing changes run: | diff --git a/.github/workflows/irm_sim.yml b/.github/workflows/irm_sim.yml index dcd0d4c..5d26a1b 100644 --- a/.github/workflows/irm_sim.yml +++ b/.github/workflows/irm_sim.yml @@ -17,10 +17,10 @@ jobs: strategy: matrix: script: [ - 'scripts/irm/irm_ate_coverage.py', - 'scripts/irm/irm_atte_coverage.py', - 'scripts/irm/irm_cate_coverage.py', - 'scripts/irm/irm_gate_coverage.py', + 'scripts/irm/irm_ate.py', + 'scripts/irm/irm_atte.py', + 'scripts/irm/irm_cate.py', + 'scripts/irm/irm_gate.py', 'scripts/irm/irm_ate_sensitivity.py', 'scripts/irm/irm_atte_sensitivity.py', ] @@ -52,20 +52,27 @@ jobs: with: ref: ${{ env.TARGET_BRANCH }} + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.7.8" + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.12' + python-version-file: "monte-cover/pyproject.toml" - - name: Install dependencies + - name: Install Monte-Cover run: | - python -m pip install --upgrade pip - pip install -r requirements.txt + cd monte-cover + uv venv + uv sync - name: Install DoubleML from correct branch run: | - pip uninstall -y doubleml - pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}" + source monte-cover/.venv/bin/activate + uv pip uninstall doubleml + uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}" - name: Set up Git configuration run: | @@ -73,7 +80,9 @@ jobs: git config --global user.email 'github-actions@github.com' - name: Run scripts - run: python ${{ matrix.script }} + run: | + source monte-cover/.venv/bin/activate + uv run ${{ matrix.script }} - name: Commit any existing changes run: | diff --git a/.github/workflows/quant_sim.yml b/.github/workflows/quant_sim.yml index a4d1ad5..8304a93 100644 --- a/.github/workflows/quant_sim.yml +++ b/.github/workflows/quant_sim.yml @@ -17,9 +17,9 @@ jobs: strategy: matrix: script: [ - 'scripts/irm/cvar_coverage.py', - 'scripts/irm/pq_coverage.py', - 'scripts/irm/lpq_coverage.py', + 'scripts/irm/cvar.py', + 'scripts/irm/pq.py', + 'scripts/irm/lpq.py', ] steps: @@ -49,20 +49,27 @@ jobs: with: ref: ${{ env.TARGET_BRANCH }} + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "0.7.8" + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.12' + python-version-file: "monte-cover/pyproject.toml" - - name: Install dependencies + - name: Install Monte-Cover run: | - python -m pip install --upgrade pip - pip install -r requirements.txt + cd monte-cover + uv venv + uv sync - name: Install DoubleML from correct branch run: | - pip uninstall -y doubleml - pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}" + source monte-cover/.venv/bin/activate + uv pip uninstall doubleml + uv pip install "doubleml @ git+https://github.com/DoubleML/doubleml-for-py@${{ env.DML_BRANCH }}" - name: Set up Git configuration run: | @@ -70,7 +77,9 @@ jobs: git config --global user.email 'github-actions@github.com' - name: Run scripts - run: python ${{ matrix.script }} + run: | + source monte-cover/.venv/bin/activate + uv run ${{ matrix.script }} - name: Commit any existing changes run: | diff --git a/.github/workflows/ssm_sim.yml b/.github/workflows/ssm_sim.yml index ae59bca..cdef61c 100644 --- a/.github/workflows/ssm_sim.yml +++ b/.github/workflows/ssm_sim.yml @@ -17,8 +17,8 @@ jobs: strategy: matrix: script: [ - 'scripts/irm/ssm_mar_ate_coverage.py', - 'scripts/irm/ssm_nonignorable_ate_coverage.py', + 'scripts/ssm/ssm_mar_ate.py', + 'scripts/ssm/ssm_nonig_ate.py', ] steps: @@ -73,7 +73,7 @@ jobs: - name: Commit any existing changes run: | - git add results/irm + git add results/ssm git commit -m "Update results from script: ${{ matrix.script }}" || echo "No changed results to commit" - name: Wait random time diff --git a/doc/irm/apo.qmd b/doc/irm/apo.qmd index 3b8607a..376f083 100644 --- a/doc/irm/apo.qmd +++ b/doc/irm/apo.qmd @@ -30,7 +30,7 @@ The simulations are based on the the [make_irm_data_discrete_treatments](https: ```{python} #| echo: false -metadata_file = '../../results/irm/irm_apo_coverage_metadata.csv' +metadata_file = '../../results/irm/apo_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -41,7 +41,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data -df_apo = pd.read_csv("../../results/irm/irm_apo_coverage_apo.csv", index_col=None) +df_apo = pd.read_csv("../../results/irm/apo_coverage.csv", index_col=None) assert df_apo["repetition"].nunique() == 1 n_rep_apo = df_apo["repetition"].unique()[0] @@ -88,7 +88,7 @@ The non-uniform results (coverage, ci length and bias) refer to averaged values ```{python} #| echo: false -metadata_file = '../../results/irm/irm_apo_coverage_metadata.csv' +metadata_file = '../../results/irm/apos_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -99,7 +99,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data -df_apos = pd.read_csv("../../results/irm/irm_apo_coverage_apos.csv", index_col=None) +df_apos = pd.read_csv("../../results/irm/apos_coverage.csv", index_col=None) assert df_apos["repetition"].nunique() == 1 n_rep_apos = df_apos["repetition"].unique()[0] @@ -144,7 +144,7 @@ The non-uniform results (coverage, ci length and bias) refer to averaged values ```{python} #| echo: false -metadata_file = '../../results/irm/irm_apo_coverage_metadata.csv' +metadata_file = '../../results/irm/apos_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -155,7 +155,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data -df_contrast = pd.read_csv("../../results/irm/irm_apo_coverage_apos_contrast.csv", index_col=None) +df_contrast = pd.read_csv("../../results/irm/apos_causal_contrast.csv", index_col=None) assert df_contrast["repetition"].nunique() == 1 n_rep_contrast = df_contrast["repetition"].unique()[0] diff --git a/doc/irm/irm.qmd b/doc/irm/irm.qmd index 01fae21..f30fe75 100644 --- a/doc/irm/irm.qmd +++ b/doc/irm/irm.qmd @@ -86,7 +86,7 @@ As for the ATE, the simulations are based on the the [make_irm_data](https://do ```{python} #| echo: false -metadata_file = '../../results/irm/irm_atte_coverage_metadata.csv' +metadata_file = '../../results/irm/irm_atte_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -135,7 +135,7 @@ generate_and_show_styled_table( ## Sensitivity -The simulations are based on the the ADD-DGP with $10,000$ observations. As the DGP is nonlinear, we will only use corresponding learners. Since the DGP includes an unobserved confounder, we would expect a bias in the ATE estimates, leading to low coverage of the true parameter. +The simulations are based on the the [make_confounded_irm_data](https://docs.doubleml.org/stable/api/generated/doubleml.datasets.make_confounded_irm_data.html#doubleml.datasets.make_confounded_irm_data)-DGP with $5,000$ observations. Since the DGP includes an unobserved confounder, we would expect a bias in the ATE estimates, leading to low coverage of the true parameter. The confounding is set such that both sensitivity parameters are approximately $cf_y=cf_d=0.1$, such that the robustness value $RV$ should be approximately $10\%$. Further, the corresponding confidence intervals are one-sided (since the direction of the bias is unkown), such that only one side should approximate the corresponding coverage level (here only the lower coverage is relevant since the bias is positive). Remark that for the coverage level the value of $\rho$ has to be correctly specified, such that the coverage level will be generally (significantly) larger than the nominal level under the conservative choice of $|\rho|=1$. @@ -157,7 +157,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data and rename columns -df_ate_sens = pd.read_csv("../../results/irm/irm_ate_sensitivity.csv", index_col=None) +df_ate_sens = pd.read_csv("../../results/irm/irm_ate_sensitivity_coverage.csv", index_col=None) assert df_ate_sens["repetition"].nunique() == 1 n_rep_ate_sens = df_ate_sens["repetition"].unique()[0] @@ -211,7 +211,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data -df_atte_sens = pd.read_csv("../../results/irm/irm_atte_sensitivity.csv", index_col=None) +df_atte_sens = pd.read_csv("../../results/irm/irm_atte_sensitivity_coverage.csv", index_col=None) assert df_atte_sens["repetition"].nunique() == 1 n_rep_atte_sens = df_atte_sens["repetition"].unique()[0] diff --git a/doc/irm/qte.qmd b/doc/irm/qte.qmd index afce285..4b60ccc 100644 --- a/doc/irm/qte.qmd +++ b/doc/irm/qte.qmd @@ -31,7 +31,7 @@ The non-uniform results (coverage, ci length and bias) refer to averaged values ```{python} #| echo: false -metadata_file = '../../results/irm/pq_coverage_metadata.csv' +metadata_file = '../../results/irm/pq_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -42,7 +42,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data -df_qte = pd.read_csv("../../results/irm/pq_coverage_qte.csv", index_col=None) +df_qte = pd.read_csv("../../results/irm/pq_effect_coverage.csv", index_col=None) assert df_qte["repetition"].nunique() == 1 n_rep_qte = df_qte["repetition"].unique()[0] @@ -85,7 +85,7 @@ generate_and_show_styled_table( #| echo: false # set up data -df_pq0 = pd.read_csv("../../results/irm/pq_coverage_pq0.csv", index_col=None) +df_pq0 = pd.read_csv("../../results/irm/pq_Y0_coverage.csv", index_col=None) assert df_pq0["repetition"].nunique() == 1 n_rep_pq0 = df_pq0["repetition"].unique()[0] @@ -125,7 +125,7 @@ generate_and_show_styled_table( #| echo: false # set up data and rename columns -df_pq1 = pd.read_csv("../../results/irm/pq_coverage_pq1.csv", index_col=None) +df_pq1 = pd.read_csv("../../results/irm/pq_Y1_coverage.csv", index_col=None) assert df_pq1["repetition"].nunique() == 1 n_rep_pq1 = df_pq1["repetition"].unique()[0] @@ -161,7 +161,7 @@ generate_and_show_styled_table( ## LQTE -The results are based on a location-scale model as described the corresponding [Example](https://docs.doubleml.org/stable/examples/py_double_ml_pq.html#Local-Potential-Quantiles-(LPQs)) with $10,000$ observations. +The results are based on a location-scale model as described the corresponding [Example](https://docs.doubleml.org/stable/examples/py_double_ml_pq.html#Local-Potential-Quantiles-(LPQs)) with $5,000$ observations. The non-uniform results (coverage, ci length and bias) refer to averaged values over all quantiles (point-wise confidende intervals). @@ -169,7 +169,7 @@ The non-uniform results (coverage, ci length and bias) refer to averaged values ```{python} #| echo: false -metadata_file = '../../results/irm/lpq_coverage_metadata.csv' +metadata_file = '../../results/irm/lpq_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -180,7 +180,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data -df_lqte = pd.read_csv("../../results/irm/lpq_coverage_lqte.csv", index_col=None) +df_lqte = pd.read_csv("../../results/irm/lpq_effect_coverage.csv", index_col=None) assert df_lqte["repetition"].nunique() == 1 n_rep_lqte = df_lqte["repetition"].unique()[0] @@ -222,7 +222,7 @@ generate_and_show_styled_table( #| echo: false # set up data -df_lpq0 = pd.read_csv("../../results/irm/lpq_coverage_lpq0.csv", index_col=None) +df_lpq0 = pd.read_csv("../../results/irm/lpq_Y0_coverage.csv", index_col=None) assert df_lpq0["repetition"].nunique() == 1 n_rep_lpq0 = df_lpq0["repetition"].unique()[0] @@ -262,7 +262,7 @@ generate_and_show_styled_table( #| echo: false # set up data -df_lpq1 = pd.read_csv("../../results/irm/lpq_coverage_lpq1.csv", index_col=None) +df_lpq1 = pd.read_csv("../../results/irm/lpq_Y1_coverage.csv", index_col=None) assert df_lpq1["repetition"].nunique() == 1 n_rep_lpq1 = df_lpq1["repetition"].unique()[0] @@ -306,7 +306,7 @@ The non-uniform results (coverage, ci length and bias) refer to averaged values ```{python} #| echo: false -metadata_file = '../../results/irm/cvar_coverage_metadata.csv' +metadata_file = '../../results/irm/cvar_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -317,7 +317,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data -df_cvar_qte = pd.read_csv("../../results/irm/cvar_coverage_qte.csv", index_col=None) +df_cvar_qte = pd.read_csv("../../results/irm/cvar_effect_coverage.csv", index_col=None) assert df_cvar_qte["repetition"].nunique() == 1 n_rep_cvar_qte = df_cvar_qte["repetition"].unique()[0] @@ -359,7 +359,7 @@ generate_and_show_styled_table( #| echo: false # set up data -df_cvar_pq0 = pd.read_csv("../../results/irm/cvar_coverage_pq0.csv", index_col=None) +df_cvar_pq0 = pd.read_csv("../../results/irm/cvar_Y0_coverage.csv", index_col=None) assert df_cvar_pq0["repetition"].nunique() == 1 n_rep_cvar_pq0 = df_cvar_pq0["repetition"].unique()[0] @@ -399,7 +399,7 @@ generate_and_show_styled_table( #| echo: false # set up data -df_cvar_pq1 = pd.read_csv("../../results/irm/cvar_coverage_pq1.csv", index_col=None) +df_cvar_pq1 = pd.read_csv("../../results/irm/cvar_Y1_coverage.csv", index_col=None) assert df_cvar_pq1["repetition"].nunique() == 1 n_rep_cvar_pq1 = df_cvar_pq1["repetition"].unique()[0] diff --git a/doc/ssm/ssm_mar.qmd b/doc/ssm/ssm_mar.qmd index 65a1871..a396fa4 100644 --- a/doc/ssm/ssm_mar.qmd +++ b/doc/ssm/ssm_mar.qmd @@ -30,7 +30,7 @@ The simulations are based on the [make_ssm_data](https://docs.doubleml.org/stabl ```{python} #| echo: false -metadata_file = '../../results/irm/ssm_mar_ate_coverage_metadata.csv' +metadata_file = '../../results/ssm/ssm_mar_ate_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -42,7 +42,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data and rename columns -df = pd.read_csv("../../results/irm/ssm_mar_ate_coverage.csv", index_col=None) +df = pd.read_csv("../../results/ssm/ssm_mar_ate_coverage.csv", index_col=None) assert df["repetition"].nunique() == 1 n_rep = df["repetition"].unique()[0] diff --git a/doc/ssm/ssm_nonignorable.qmd b/doc/ssm/ssm_nonignorable.qmd index f0a807d..8eff76b 100644 --- a/doc/ssm/ssm_nonignorable.qmd +++ b/doc/ssm/ssm_nonignorable.qmd @@ -31,7 +31,7 @@ The simulations are based on the [make_ssm_data](https://docs.doubleml.org/stabl ```{python} #| echo: false #| collapse: true -metadata_file = '../../results/irm/ssm_nonignorable_ate_coverage_metadata.csv' +metadata_file = '../../results/ssm/ssm_nonig_ate_metadata.csv' metadata_df = pd.read_csv(metadata_file) print(metadata_df.T.to_string(header=False)) ``` @@ -42,7 +42,7 @@ print(metadata_df.T.to_string(header=False)) #| echo: false # set up data and rename columns -df = pd.read_csv("../../results/irm/ssm_nonignorable_ate_coverage.csv", index_col=None) +df = pd.read_csv("../../results/ssm/ssm_nonig_ate_coverage.csv", index_col=None) assert df["repetition"].nunique() == 1 n_rep = df["repetition"].unique()[0] diff --git a/monte-cover/src/montecover/irm/__init__.py b/monte-cover/src/montecover/irm/__init__.py new file mode 100644 index 0000000..57050ae --- /dev/null +++ b/monte-cover/src/montecover/irm/__init__.py @@ -0,0 +1,27 @@ +"""Monte Carlo coverage simulations for IRM.""" + +from montecover.irm.apo import APOCoverageSimulation +from montecover.irm.apos import APOSCoverageSimulation +from montecover.irm.cvar import CVARCoverageSimulation +from montecover.irm.irm_ate import IRMATECoverageSimulation +from montecover.irm.irm_ate_sensitivity import IRMATESensitivityCoverageSimulation +from montecover.irm.irm_atte import IRMATTECoverageSimulation +from montecover.irm.irm_atte_sensitivity import IRMATTESensitivityCoverageSimulation +from montecover.irm.irm_cate import IRMCATECoverageSimulation +from montecover.irm.irm_gate import IRMGATECoverageSimulation +from montecover.irm.lpq import LPQCoverageSimulation +from montecover.irm.pq import PQCoverageSimulation + +__all__ = [ + "APOCoverageSimulation", + "APOSCoverageSimulation", + "CVARCoverageSimulation", + "IRMATECoverageSimulation", + "IRMATESensitivityCoverageSimulation", + "IRMATTECoverageSimulation", + "IRMATTESensitivityCoverageSimulation", + "IRMCATECoverageSimulation", + "IRMGATECoverageSimulation", + "LPQCoverageSimulation", + "PQCoverageSimulation", +] diff --git a/monte-cover/src/montecover/irm/apo.py b/monte-cover/src/montecover/irm/apo.py new file mode 100644 index 0000000..b887b7d --- /dev/null +++ b/monte-cover/src/montecover/irm/apo.py @@ -0,0 +1,152 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd +from doubleml.datasets import make_irm_data_discrete_treatments + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class APOCoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLAPOs for APO estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + n_levels = self.dgp_parameters["n_levels"][0] + data_apo_oracle = make_irm_data_discrete_treatments( + n_obs=int(1e6), n_levels=n_levels, linear=self.dgp_parameters["linear"][0] + ) + + y0 = data_apo_oracle["oracle_values"]["y0"] + ite = data_apo_oracle["oracle_values"]["ite"] + d = data_apo_oracle["d"] + + average_ites = np.full(n_levels + 1, np.nan) + apos = np.full(n_levels + 1, np.nan) + for i in range(n_levels + 1): + average_ites[i] = np.mean(ite[d == i]) * (i > 0) + apos[i] = np.mean(y0) + average_ites[i] + + ates = np.full(n_levels, np.nan) + for i in range(n_levels): + ates[i] = apos[i + 1] - apos[0] + + self.logger.info(f"Levels and their counts:\n{np.unique(d, return_counts=True)}") + self.logger.info(f"True APOs: {apos}") + self.logger.info(f"True ATEs: {ates}") + + self.oracle_values = dict() + self.oracle_values["apos"] = apos + self.oracle_values["ates"] = ates + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + treatment_level = dml_params["treatment_level"] + trimming_threshold = dml_params["trimming_threshold"] + + # Model + dml_model = dml.DoubleMLAPO( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + treatment_level=treatment_level, + trimming_threshold=trimming_threshold, + ) + dml_model.fit() + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=self.oracle_values["apos"][treatment_level], + confint=dml_model.confint(level=level), + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "Treatment Level": treatment_level, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "Treatment Level", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_irm_data_discrete_treatments( + n_obs=dgp_params["n_obs"], + n_levels=dgp_params["n_levels"], + linear=dgp_params["linear"], + ) + df_apo = pd.DataFrame( + np.column_stack((data["y"], data["d"], data["x"])), + columns=["y", "d"] + ["x" + str(i) for i in range(data["x"].shape[1])], + ) + dml_data = dml.DoubleMLData(df_apo, "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/apos.py b/monte-cover/src/montecover/irm/apos.py new file mode 100644 index 0000000..4b19deb --- /dev/null +++ b/monte-cover/src/montecover/irm/apos.py @@ -0,0 +1,164 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd +from doubleml.datasets import make_irm_data_discrete_treatments + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class APOSCoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLAPOs for APO estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + n_levels = self.dgp_parameters["n_levels"][0] + data_apo_oracle = make_irm_data_discrete_treatments( + n_obs=int(1e6), n_levels=n_levels, linear=self.dgp_parameters["linear"][0] + ) + + y0 = data_apo_oracle["oracle_values"]["y0"] + ite = data_apo_oracle["oracle_values"]["ite"] + d = data_apo_oracle["d"] + + average_ites = np.full(n_levels + 1, np.nan) + apos = np.full(n_levels + 1, np.nan) + for i in range(n_levels + 1): + average_ites[i] = np.mean(ite[d == i]) * (i > 0) + apos[i] = np.mean(y0) + average_ites[i] + + ates = np.full(n_levels, np.nan) + for i in range(n_levels): + ates[i] = apos[i + 1] - apos[0] + + self.logger.info(f"Levels and their counts:\n{np.unique(d, return_counts=True)}") + self.logger.info(f"True APOs: {apos}") + self.logger.info(f"True ATEs: {ates}") + + self.oracle_values = dict() + self.oracle_values["apos"] = apos + self.oracle_values["ates"] = ates + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + treatment_levels = dml_params["treatment_levels"] + trimming_threshold = dml_params["trimming_threshold"] + + # Model + dml_model = dml.DoubleMLAPOS( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + treatment_levels=treatment_levels, + trimming_threshold=trimming_threshold, + ) + dml_model.fit() + dml_model.bootstrap(n_rep_boot=2000) + + causal_contrast_model = dml_model.causal_contrast(reference_levels=0) + causal_contrast_model.bootstrap(n_rep_boot=2000) + + result = { + "coverage": [], + "causal_contrast": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=self.oracle_values["apos"], + confint=dml_model.confint(level=level), + joint_confint=dml_model.confint(level=level, joint=True), + ) + level_result["causal_contrast"] = self._compute_coverage( + thetas=causal_contrast_model.thetas, + oracle_thetas=self.oracle_values["ates"], + confint=causal_contrast_model.confint(level=level), + joint_confint=causal_contrast_model.confint(level=level, joint=True), + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Uniform Coverage": "mean", + "Uniform CI Length": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_irm_data_discrete_treatments( + n_obs=dgp_params["n_obs"], + n_levels=dgp_params["n_levels"], + linear=dgp_params["linear"], + ) + df_apo = pd.DataFrame( + np.column_stack((data["y"], data["d"], data["x"])), + columns=["y", "d"] + ["x" + str(i) for i in range(data["x"].shape[1])], + ) + dml_data = dml.DoubleMLData(df_apo, "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/cvar.py b/monte-cover/src/montecover/irm/cvar.py new file mode 100644 index 0000000..19180c0 --- /dev/null +++ b/monte-cover/src/montecover/irm/cvar.py @@ -0,0 +1,214 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +# define loc-scale model +def f_loc(D, X): + loc = 0.5 * D + 2 * D * X[:, 4] + 2.0 * (X[:, 1] > 0.1) - 1.7 * (X[:, 0] * X[:, 2] > 0) - 3 * X[:, 3] + return loc + + +def f_scale(D, X): + scale = np.sqrt(0.5 * D + 0.3 * D * X[:, 1] + 2) + return scale + + +def dgp(n=200, p=5): + X = np.random.uniform(-1, 1, size=[n, p]) + D = ((X[:, 1] - X[:, 3] + 1.5 * (X[:, 0] > 0) + np.random.normal(size=n)) > 0) * 1.0 + epsilon = np.random.normal(size=n) + + Y = f_loc(D, X) + f_scale(D, X) * epsilon + return Y, X, D, epsilon + + +class CVARCoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLCVAR for Conditional Value at Risk estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + # Parameters + n_true = int(10e6) + tau_vec = self.dml_parameters["tau_vec"][0] + p = self.dgp_parameters["dim_x"][0] + + _, X_true, _, epsilon_true = dgp(n=n_true, p=p) + D1 = np.ones(n_true) + D0 = np.zeros(n_true) + + Y1 = f_loc(D1, X_true) + f_scale(D1, X_true) * epsilon_true + Y0 = f_loc(D0, X_true) + f_scale(D0, X_true) * epsilon_true + + Y1_quant = np.quantile(Y1, q=tau_vec) + Y0_quant = np.quantile(Y0, q=tau_vec) + Y1_cvar = [Y1[Y1 >= quant].mean() for quant in Y1_quant] + Y0_cvar = [Y0[Y0 >= quant].mean() for quant in Y0_quant] + effect_cvar = np.array(Y1_cvar) - np.array(Y0_cvar) + + self.oracle_values = dict() + self.oracle_values["effect_cvar"] = effect_cvar + self.oracle_values["Y1_cvar"] = Y1_cvar + self.oracle_values["Y0_cvar"] = Y0_cvar + + self.logger.info(f"Oracle values: {self.oracle_values}") + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + tau_vec = dml_params["tau_vec"] + trimming_threshold = dml_params["trimming_threshold"] + Y0_cvar = self.oracle_values["Y0_cvar"] + Y1_cvar = self.oracle_values["Y1_cvar"] + effect_cvar = self.oracle_values["effect_cvar"] + + # Model + dml_model = dml.DoubleMLQTE( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + score="CVaR", + quantiles=tau_vec, + trimming_threshold=trimming_threshold, + ) + dml_model.fit() + dml_model.bootstrap(n_rep_boot=2000) + + result = { + "Y0_coverage": [], + "Y1_coverage": [], + "effect_coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["effect_coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=effect_cvar, + confint=dml_model.confint(level=level), + joint_confint=dml_model.confint(level=level, joint=True), + ) + + Y0_estimates = np.full(len(tau_vec), np.nan) + Y1_estimates = np.full(len(tau_vec), np.nan) + + Y0_confint = np.full((len(tau_vec), 2), np.nan) + Y1_confint = np.full((len(tau_vec), 2), np.nan) + + for tau_idx in range(len(tau_vec)): + model_Y0 = dml_model.modellist_0[tau_idx] + model_Y1 = dml_model.modellist_1[tau_idx] + + Y0_estimates[tau_idx] = model_Y0.coef + Y1_estimates[tau_idx] = model_Y1.coef + + Y0_confint[tau_idx, :] = model_Y0.confint(level=level) + Y1_confint[tau_idx, :] = model_Y1.confint(level=level) + + Y0_confint_df = pd.DataFrame(Y0_confint, columns=["lower", "upper"]) + Y1_confint_df = pd.DataFrame(Y1_confint, columns=["lower", "upper"]) + + level_result["Y0_coverage"] = self._compute_coverage( + thetas=Y0_estimates, + oracle_thetas=Y0_cvar, + confint=Y0_confint_df, + joint_confint=None, + ) + + level_result["Y1_coverage"] = self._compute_coverage( + thetas=Y1_estimates, + oracle_thetas=Y1_cvar, + confint=Y1_confint_df, + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + result_summary = dict() + # Aggregate results for Y0 and Y1 + for result_name in ["Y0_coverage", "Y1_coverage"]: + df = self.results[result_name] + result_summary[result_name] = df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + uniform_aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Uniform Coverage": "mean", + "Uniform CI Length": "mean", + "repetition": "count", + } + result_summary["effect_coverage"] = ( + self.results["effect_coverage"].groupby(groupby_cols).agg(uniform_aggregation_dict).reset_index() + ) + self.logger.debug("Summarized effect_coverage results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + Y, X, D, _ = dgp(n=dgp_params["n_obs"], p=dgp_params["dim_x"]) + dml_data = dml.DoubleMLData.from_arrays(X, Y, D) + return dml_data diff --git a/monte-cover/src/montecover/irm/irm_ate.py b/monte-cover/src/montecover/irm/irm_ate.py new file mode 100644 index 0000000..09b3f83 --- /dev/null +++ b/monte-cover/src/montecover/irm/irm_ate.py @@ -0,0 +1,118 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +from doubleml.datasets import make_irm_data + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class IRMATECoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLIRM for ATE estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + self.oracle_values = dict() + self.oracle_values["theta"] = self.dgp_parameters["theta"] + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + + # Model + dml_model = dml.DoubleMLIRM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + ) + dml_model.fit() + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=self.oracle_values["theta"], + confint=dml_model.confint(level=level), + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_irm_data( + theta=dgp_params["theta"], + n_obs=dgp_params["n_obs"], + dim_x=dgp_params["dim_x"], + return_type="DataFrame", + ) + dml_data = dml.DoubleMLData(data, "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/irm_ate_sensitivity.py b/monte-cover/src/montecover/irm/irm_ate_sensitivity.py new file mode 100644 index 0000000..09ca004 --- /dev/null +++ b/monte-cover/src/montecover/irm/irm_ate_sensitivity.py @@ -0,0 +1,172 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd +from doubleml.datasets import make_confounded_irm_data + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class IRMATESensitivityCoverageSimulation(BaseSimulation): + """Simulation class for sensitivity properties of DoubleMLIRM for ATE estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + dgp_dict = make_confounded_irm_data( + n_obs=int(1e6), + theta=self.dgp_parameters["theta"][0], + gamma_a=self.dgp_parameters["gamma_a"][0], + beta_a=self.dgp_parameters["beta_a"][0], + var_epsilon_y=self.dgp_parameters["var_epsilon_y"][0], + trimming_threshold=self.dgp_parameters["trimming_threshold"][0], + linear=self.dgp_parameters["linear"][0], + ) + + self.oracle_values = { + "theta": self.dgp_parameters["theta"], + "cf_y": dgp_dict["oracle_values"]["cf_y"], + "cf_d": dgp_dict["oracle_values"]["cf_d_ate"], + "rho": dgp_dict["oracle_values"]["rho_ate"], + } + self.logger.info(f"Oracle values: {self.oracle_values}") + + def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + trimming_threshold = dml_params["trimming_threshold"] + theta = self.oracle_values["theta"][0] + + # Model + dml_model = dml.DoubleMLIRM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + score="ATE", + trimming_threshold=trimming_threshold, + ) + dml_model.fit() + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=theta, + confint=dml_model.confint(level=level), + joint_confint=None, + ) + + # sensitvity analysis + dml_model.sensitivity_analysis( + cf_y=self.oracle_values["cf_y"], + cf_d=self.oracle_values["cf_d"], + rho=self.oracle_values["rho"], + level=level, + null_hypothesis=theta, + ) + sensitivity_results = { + "Coverage (Lower)": theta >= dml_model.sensitivity_params["ci"]["lower"][0], + "Coverage (Upper)": theta <= dml_model.sensitivity_params["ci"]["upper"][0], + "RV": dml_model.sensitivity_params["rv"][0], + "RVa": dml_model.sensitivity_params["rva"][0], + "Bias (Lower)": abs(theta - dml_model.sensitivity_params["theta"]["lower"][0]), + "Bias (Upper)": abs(theta - dml_model.sensitivity_params["theta"]["upper"][0]), + } + # add sensitivity results to the level result coverage + level_result["coverage"].update(sensitivity_results) + + # add parameters to the result + for res in level_result.values(): + res.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Coverage (Lower)": "mean", + "Coverage (Upper)": "mean", + "RV": "mean", + "RVa": "mean", + "Bias (Lower)": "mean", + "Bias (Upper)": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData: + """Generate data for the simulation.""" + dgp_dict = make_confounded_irm_data( + n_obs=dgp_params["n_obs"], + theta=dgp_params["theta"], + gamma_a=dgp_params["gamma_a"], + beta_a=dgp_params["beta_a"], + var_epsilon_y=dgp_params["var_epsilon_y"], + trimming_threshold=dgp_params["trimming_threshold"], + linear=dgp_params["linear"], + ) + x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])] + df = pd.DataFrame( + np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])), + columns=x_cols + ["y", "d"], + ) + dml_data = dml.DoubleMLData(df, "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/irm_atte.py b/monte-cover/src/montecover/irm/irm_atte.py new file mode 100644 index 0000000..4dbb449 --- /dev/null +++ b/monte-cover/src/montecover/irm/irm_atte.py @@ -0,0 +1,161 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +from doubleml.datasets import make_irm_data +from scipy.linalg import toeplitz + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class IRMATTECoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLIRM for ATTE estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + theta = self.dgp_parameters["theta"][0] + dim_x = self.dgp_parameters["dim_x"][0] + + n_obs_atte = int(1e6) + R2_d = 0.5 + R2_y = 0.5 + + v = np.random.uniform( + size=[ + n_obs_atte, + ] + ) + zeta = np.random.standard_normal( + size=[ + n_obs_atte, + ] + ) + + cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs_atte, + ], + ) + + beta = [1 / (k**2) for k in range(1, dim_x + 1)] + b_sigma_b = np.dot(np.dot(cov_mat, beta), beta) + c_y = np.sqrt(R2_y / ((1 - R2_y) * b_sigma_b)) + c_d = np.sqrt(np.pi**2 / 3.0 * R2_d / ((1 - R2_d) * b_sigma_b)) + + xx = np.exp(np.dot(x, np.multiply(beta, c_d))) + d = 1.0 * ((xx / (1 + xx)) > v) + + # y = d * theta + d * np.dot(x, np.multiply(beta, c_y)) + zeta + y0 = zeta + y1 = theta + np.dot(x, np.multiply(beta, c_y)) + zeta + + self.oracle_values = dict() + self.oracle_values["theta"] = np.mean(y1[d == 1] - y0[d == 1]) + self.logger.info(f"Oracle ATTE value: {self.oracle_values['theta']}") + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + + # Model + dml_model = dml.DoubleMLIRM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + score="ATTE", + ) + dml_model.fit() + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=self.oracle_values["theta"], + confint=dml_model.confint(level=level), + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_irm_data( + theta=dgp_params["theta"], + n_obs=dgp_params["n_obs"], + dim_x=dgp_params["dim_x"], + return_type="DataFrame", + ) + dml_data = dml.DoubleMLData(data, "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/irm_atte_sensitivity.py b/monte-cover/src/montecover/irm/irm_atte_sensitivity.py new file mode 100644 index 0000000..47ec91f --- /dev/null +++ b/monte-cover/src/montecover/irm/irm_atte_sensitivity.py @@ -0,0 +1,172 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd +from doubleml.datasets import make_confounded_irm_data + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class IRMATTESensitivityCoverageSimulation(BaseSimulation): + """Simulation class for sensitivity properties of DoubleMLIRM for ATE estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + dgp_dict = make_confounded_irm_data( + n_obs=int(1e6), + theta=self.dgp_parameters["theta"][0], + gamma_a=self.dgp_parameters["gamma_a"][0], + beta_a=self.dgp_parameters["beta_a"][0], + var_epsilon_y=self.dgp_parameters["var_epsilon_y"][0], + trimming_threshold=self.dgp_parameters["trimming_threshold"][0], + linear=self.dgp_parameters["linear"][0], + ) + + self.oracle_values = { + "theta": self.dgp_parameters["theta"], + "cf_y": dgp_dict["oracle_values"]["cf_y"], + "cf_d": dgp_dict["oracle_values"]["cf_d_atte"], + "rho": dgp_dict["oracle_values"]["rho_atte"], + } + self.logger.info(f"Oracle values: {self.oracle_values}") + + def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + trimming_threshold = dml_params["trimming_threshold"] + theta = self.oracle_values["theta"][0] + + # Model + dml_model = dml.DoubleMLIRM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + score="ATTE", + trimming_threshold=trimming_threshold, + ) + dml_model.fit() + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=theta, + confint=dml_model.confint(level=level), + joint_confint=None, + ) + + # sensitvity analysis + dml_model.sensitivity_analysis( + cf_y=self.oracle_values["cf_y"], + cf_d=self.oracle_values["cf_d"], + rho=self.oracle_values["rho"], + level=level, + null_hypothesis=theta, + ) + sensitivity_results = { + "Coverage (Lower)": theta >= dml_model.sensitivity_params["ci"]["lower"][0], + "Coverage (Upper)": theta <= dml_model.sensitivity_params["ci"]["upper"][0], + "RV": dml_model.sensitivity_params["rv"][0], + "RVa": dml_model.sensitivity_params["rva"][0], + "Bias (Lower)": abs(theta - dml_model.sensitivity_params["theta"]["lower"][0]), + "Bias (Upper)": abs(theta - dml_model.sensitivity_params["theta"]["upper"][0]), + } + # add sensitivity results to the level result coverage + level_result["coverage"].update(sensitivity_results) + + # add parameters to the result + for res in level_result.values(): + res.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Coverage (Lower)": "mean", + "Coverage (Upper)": "mean", + "RV": "mean", + "RVa": "mean", + "Bias (Lower)": "mean", + "Bias (Upper)": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData: + """Generate data for the simulation.""" + dgp_dict = make_confounded_irm_data( + n_obs=dgp_params["n_obs"], + theta=dgp_params["theta"], + gamma_a=dgp_params["gamma_a"], + beta_a=dgp_params["beta_a"], + var_epsilon_y=dgp_params["var_epsilon_y"], + trimming_threshold=dgp_params["trimming_threshold"], + linear=dgp_params["linear"], + ) + x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])] + df = pd.DataFrame( + np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])), + columns=x_cols + ["y", "d"], + ) + dml_data = dml.DoubleMLData(df, "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/irm_cate.py b/monte-cover/src/montecover/irm/irm_cate.py new file mode 100644 index 0000000..73d5b97 --- /dev/null +++ b/monte-cover/src/montecover/irm/irm_cate.py @@ -0,0 +1,158 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd +import patsy +from doubleml.datasets import make_heterogeneous_data +from sklearn.linear_model import LinearRegression + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class IRMCATECoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLIRM for CATE estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + # Oracle values + data_oracle = make_heterogeneous_data( + n_obs=int(1e6), + p=self.dgp_parameters["p"][0], + support_size=self.dgp_parameters["support_size"][0], + n_x=self.dgp_parameters["n_x"][0], + binary_treatment=True, + ) + + self.logger.info("Calculating oracle values") + + design_matrix_oracle = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": data_oracle["data"]["X_0"]}) + spline_basis_oracle = pd.DataFrame(design_matrix_oracle) + oracle_model = LinearRegression() + oracle_model.fit(spline_basis_oracle, data_oracle["effects"]) + + # evaluate on grid + grid = {"x": np.linspace(0.1, 0.9, 100)} + spline_grid_oracle = pd.DataFrame(patsy.build_design_matrices([design_matrix_oracle.design_info], grid)[0]) + oracle_cates = oracle_model.predict(spline_grid_oracle) + + self.oracle_values = dict() + self.oracle_values["cates"] = oracle_cates + self.oracle_values["grid"] = grid + + self.logger.info(f"Oracle values: {self.oracle_values}") + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + + # Model + dml_model = dml.DoubleMLIRM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + ) + dml_model.fit() + + # cate + design_matrix = patsy.dmatrix("bs(x, df=5, degree=2)", {"x": dml_data.data["X_0"]}) + spline_basis = pd.DataFrame(design_matrix) + cate_model = dml_model.cate(basis=spline_basis) + + # evaluation spline basis + spline_grid = pd.DataFrame(patsy.build_design_matrices([design_matrix.design_info], self.oracle_values["grid"])[0]) + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + confint = cate_model.confint(basis=spline_grid, level=level) + effects = confint["effect"] + uniform_confint = cate_model.confint(basis=spline_grid, level=0.95, joint=True, n_rep_boot=2000) + level_result["coverage"] = self._compute_coverage( + thetas=effects, + oracle_thetas=self.oracle_values["cates"], + confint=confint.iloc[:, [0, 2]], + joint_confint=uniform_confint.iloc[:, [0, 2]], + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Uniform Coverage": "mean", + "Uniform CI Length": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_heterogeneous_data( + n_obs=dgp_params["n_obs"], + p=dgp_params["p"], + support_size=dgp_params["support_size"], + n_x=dgp_params["n_x"], + binary_treatment=True, + ) + dml_data = dml.DoubleMLData(data["data"], "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/irm_gate.py b/monte-cover/src/montecover/irm/irm_gate.py new file mode 100644 index 0000000..64f72d3 --- /dev/null +++ b/monte-cover/src/montecover/irm/irm_gate.py @@ -0,0 +1,157 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd +from doubleml.datasets import make_heterogeneous_data + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class IRMGATECoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLIRM for GATE estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _generate_groups(self, data): + """Generate groups for the simulation.""" + groups = pd.DataFrame( + np.column_stack( + ( + data["X_0"] <= 0.3, + (data["X_0"] > 0.3) & (data["X_0"] <= 0.7), + data["X_0"] > 0.7, + ) + ), + columns=["Group 1", "Group 2", "Group 3"], + ) + return groups + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + # Oracle values + data_oracle = make_heterogeneous_data( + n_obs=int(1e6), + p=self.dgp_parameters["p"][0], + support_size=self.dgp_parameters["support_size"][0], + n_x=self.dgp_parameters["n_x"][0], + binary_treatment=True, + ) + + self.logger.info("Calculating oracle values") + groups = self._generate_groups(data_oracle["data"]) + oracle_gates = [data_oracle["effects"][groups[group]].mean() for group in groups.columns] + + self.oracle_values = dict() + self.oracle_values["gates"] = oracle_gates + + self.logger.info(f"Oracle values: {self.oracle_values}") + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + + # Model + dml_model = dml.DoubleMLIRM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + ) + dml_model.fit() + + # gate + groups = self._generate_groups(dml_data.data) + gate_model = dml_model.gate(groups=groups) + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + confint = gate_model.confint(level=level) + effects = confint["effect"] + uniform_confint = gate_model.confint(level=0.95, joint=True, n_rep_boot=2000) + level_result["coverage"] = self._compute_coverage( + thetas=effects, + oracle_thetas=self.oracle_values["gates"], + confint=confint.iloc[:, [0, 2]], + joint_confint=uniform_confint.iloc[:, [0, 2]], + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Uniform Coverage": "mean", + "Uniform CI Length": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_heterogeneous_data( + n_obs=dgp_params["n_obs"], + p=dgp_params["p"], + support_size=dgp_params["support_size"], + n_x=dgp_params["n_x"], + binary_treatment=True, + ) + dml_data = dml.DoubleMLData(data["data"], "y", "d") + return dml_data diff --git a/monte-cover/src/montecover/irm/lpq.py b/monte-cover/src/montecover/irm/lpq.py new file mode 100644 index 0000000..86b66f3 --- /dev/null +++ b/monte-cover/src/montecover/irm/lpq.py @@ -0,0 +1,233 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +# define loc-scale model +def f_loc(D, X, X_conf): + loc = 0.5 * D + 2 * D * X[:, 4] + 2.0 * (X[:, 1] > 0.1) - 1.7 * (X[:, 0] * X[:, 2] > 0) - 3 * X[:, 3] - 2 * X_conf[:, 0] + return loc + + +def f_scale(D, X, X_conf): + scale = np.sqrt(0.5 * D + 3 * D * X[:, 0] + 0.4 * X_conf[:, 0] + 2) + return scale + + +def generate_treatment(Z, X, X_conf): + eta = np.random.normal(size=len(Z)) + d = ((0.5 * Z - 0.3 * X[:, 0] + 0.7 * X_conf[:, 0] + eta) > 0) * 1.0 + return d + + +def dgp(n=200, p=5): + X = np.random.uniform(0, 1, size=[n, p]) + X_conf = np.random.uniform(-1, 1, size=[n, 1]) + Z = np.random.binomial(1, p=0.5, size=n) + D = generate_treatment(Z, X, X_conf) + epsilon = np.random.normal(size=n) + + Y = f_loc(D, X, X_conf) + f_scale(D, X, X_conf) * epsilon + + return Y, X, D, Z + + +class LPQCoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLQTE for local potential quantile estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + # Parameters + n_true = int(10e6) + tau_vec = self.dml_parameters["tau_vec"][0] + p = self.dgp_parameters["dim_x"][0] + + X_true = np.random.uniform(0, 1, size=[n_true, p]) + X_conf_true = np.random.uniform(-1, 1, size=[n_true, 1]) + Z_true = np.random.binomial(1, p=0.5, size=n_true) + D1_true = generate_treatment(np.ones_like(Z_true), X_true, X_conf_true) + D0_true = generate_treatment(np.zeros_like(Z_true), X_true, X_conf_true) + epsilon_true = np.random.normal(size=n_true) + + compliers = (D1_true == 1) * (D0_true == 0) + self.logger.info(f"Compliance probability: {str(compliers.mean())}") + n_compliers = compliers.sum() + Y1 = ( + f_loc(np.ones(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) + + f_scale(np.ones(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) * epsilon_true[compliers] + ) + Y0 = ( + f_loc(np.zeros(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) + + f_scale(np.zeros(n_compliers), X_true[compliers, :], X_conf_true[compliers, :]) * epsilon_true[compliers] + ) + + Y0_quant = np.quantile(Y0, q=tau_vec) + Y1_quant = np.quantile(Y1, q=tau_vec) + effect_quant = Y1_quant - Y0_quant + + self.oracle_values = dict() + self.oracle_values["Y0_quant"] = Y0_quant + self.oracle_values["Y1_quant"] = Y1_quant + self.oracle_values["effect_quant"] = effect_quant + + self.logger.info(f"Oracle values: {self.oracle_values}") + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + tau_vec = dml_params["tau_vec"] + trimming_threshold = dml_params["trimming_threshold"] + Y0_quant = self.oracle_values["Y0_quant"] + Y1_quant = self.oracle_values["Y1_quant"] + effect_quant = self.oracle_values["effect_quant"] + + # Model + dml_model = dml.DoubleMLQTE( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + score="LPQ", + quantiles=tau_vec, + trimming_threshold=trimming_threshold, + ) + dml_model.fit() + dml_model.bootstrap(n_rep_boot=2000) + + result = { + "Y0_coverage": [], + "Y1_coverage": [], + "effect_coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["effect_coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=effect_quant, + confint=dml_model.confint(level=level), + joint_confint=dml_model.confint(level=level, joint=True), + ) + + Y0_estimates = np.full(len(tau_vec), np.nan) + Y1_estimates = np.full(len(tau_vec), np.nan) + + Y0_confint = np.full((len(tau_vec), 2), np.nan) + Y1_confint = np.full((len(tau_vec), 2), np.nan) + + for tau_idx in range(len(tau_vec)): + model_Y0 = dml_model.modellist_0[tau_idx] + model_Y1 = dml_model.modellist_1[tau_idx] + + Y0_estimates[tau_idx] = model_Y0.coef + Y1_estimates[tau_idx] = model_Y1.coef + + Y0_confint[tau_idx, :] = model_Y0.confint(level=level) + Y1_confint[tau_idx, :] = model_Y1.confint(level=level) + + Y0_confint_df = pd.DataFrame(Y0_confint, columns=["lower", "upper"]) + Y1_confint_df = pd.DataFrame(Y1_confint, columns=["lower", "upper"]) + + level_result["Y0_coverage"] = self._compute_coverage( + thetas=Y0_estimates, + oracle_thetas=Y0_quant, + confint=Y0_confint_df, + joint_confint=None, + ) + + level_result["Y1_coverage"] = self._compute_coverage( + thetas=Y1_estimates, + oracle_thetas=Y1_quant, + confint=Y1_confint_df, + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + result_summary = dict() + # Aggregate results for Y0 and Y1 + for result_name in ["Y0_coverage", "Y1_coverage"]: + df = self.results[result_name] + result_summary[result_name] = df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + uniform_aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Uniform Coverage": "mean", + "Uniform CI Length": "mean", + "repetition": "count", + } + result_summary["effect_coverage"] = ( + self.results["effect_coverage"].groupby(groupby_cols).agg(uniform_aggregation_dict).reset_index() + ) + self.logger.debug("Summarized effect_coverage results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + Y, X, D, Z = dgp(n=dgp_params["n_obs"], p=dgp_params["dim_x"]) + dml_data = dml.DoubleMLData.from_arrays(X, Y, D, Z) + return dml_data diff --git a/monte-cover/src/montecover/irm/pq.py b/monte-cover/src/montecover/irm/pq.py new file mode 100644 index 0000000..f935dc3 --- /dev/null +++ b/monte-cover/src/montecover/irm/pq.py @@ -0,0 +1,212 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +import numpy as np +import pandas as pd + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +# define loc-scale model +def f_loc(D, X): + loc = 0.5 * D + 2 * D * X[:, 4] + 2.0 * (X[:, 1] > 0.1) - 1.7 * (X[:, 0] * X[:, 2] > 0) - 3 * X[:, 3] + return loc + + +def f_scale(D, X): + scale = np.sqrt(0.5 * D + 0.3 * D * X[:, 1] + 2) + return scale + + +def dgp(n=200, p=5): + X = np.random.uniform(-1, 1, size=[n, p]) + D = ((X[:, 1] - X[:, 3] + 1.5 * (X[:, 0] > 0) + np.random.normal(size=n)) > 0) * 1.0 + epsilon = np.random.normal(size=n) + + Y = f_loc(D, X) + f_scale(D, X) * epsilon + return Y, X, D, epsilon + + +class PQCoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLPQ for potential quantile estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + # Parameters + n_true = int(10e6) + tau_vec = self.dml_parameters["tau_vec"][0] + p = self.dgp_parameters["dim_x"][0] + + _, X_true, _, epsilon_true = dgp(n=n_true, p=p) + D1 = np.ones(n_true) + D0 = np.zeros(n_true) + + Y1 = f_loc(D1, X_true) + f_scale(D1, X_true) * epsilon_true + Y0 = f_loc(D0, X_true) + f_scale(D0, X_true) * epsilon_true + + Y1_quant = np.quantile(Y1, q=tau_vec) + Y0_quant = np.quantile(Y0, q=tau_vec) + effect_quant = Y1_quant - Y0_quant + + self.oracle_values = dict() + self.oracle_values["Y0_quant"] = Y0_quant + self.oracle_values["Y1_quant"] = Y1_quant + self.oracle_values["effect_quant"] = effect_quant + + self.logger.info(f"Oracle values: {self.oracle_values}") + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + tau_vec = dml_params["tau_vec"] + trimming_threshold = dml_params["trimming_threshold"] + Y0_quant = self.oracle_values["Y0_quant"] + Y1_quant = self.oracle_values["Y1_quant"] + effect_quant = self.oracle_values["effect_quant"] + + # Model + dml_model = dml.DoubleMLQTE( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + score="PQ", + quantiles=tau_vec, + trimming_threshold=trimming_threshold, + ) + dml_model.fit() + dml_model.bootstrap(n_rep_boot=2000) + + result = { + "Y0_coverage": [], + "Y1_coverage": [], + "effect_coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["effect_coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=effect_quant, + confint=dml_model.confint(level=level), + joint_confint=dml_model.confint(level=level, joint=True), + ) + + Y0_estimates = np.full(len(tau_vec), np.nan) + Y1_estimates = np.full(len(tau_vec), np.nan) + + Y0_confint = np.full((len(tau_vec), 2), np.nan) + Y1_confint = np.full((len(tau_vec), 2), np.nan) + + for tau_idx in range(len(tau_vec)): + model_Y0 = dml_model.modellist_0[tau_idx] + model_Y1 = dml_model.modellist_1[tau_idx] + + Y0_estimates[tau_idx] = model_Y0.coef + Y1_estimates[tau_idx] = model_Y1.coef + + Y0_confint[tau_idx, :] = model_Y0.confint(level=level) + Y1_confint[tau_idx, :] = model_Y1.confint(level=level) + + Y0_confint_df = pd.DataFrame(Y0_confint, columns=["lower", "upper"]) + Y1_confint_df = pd.DataFrame(Y1_confint, columns=["lower", "upper"]) + + level_result["Y0_coverage"] = self._compute_coverage( + thetas=Y0_estimates, + oracle_thetas=Y0_quant, + confint=Y0_confint_df, + joint_confint=None, + ) + + level_result["Y1_coverage"] = self._compute_coverage( + thetas=Y1_estimates, + oracle_thetas=Y1_quant, + confint=Y1_confint_df, + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + result_summary = dict() + # Aggregate results for Y0 and Y1 + for result_name in ["Y0_coverage", "Y1_coverage"]: + df = self.results[result_name] + result_summary[result_name] = df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + uniform_aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Uniform Coverage": "mean", + "Uniform CI Length": "mean", + "repetition": "count", + } + result_summary["effect_coverage"] = ( + self.results["effect_coverage"].groupby(groupby_cols).agg(uniform_aggregation_dict).reset_index() + ) + self.logger.debug("Summarized effect_coverage results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + Y, X, D, _ = dgp(n=dgp_params["n_obs"], p=dgp_params["dim_x"]) + dml_data = dml.DoubleMLData.from_arrays(X, Y, D) + return dml_data diff --git a/monte-cover/src/montecover/ssm/__init__.py b/monte-cover/src/montecover/ssm/__init__.py new file mode 100644 index 0000000..86d02b5 --- /dev/null +++ b/monte-cover/src/montecover/ssm/__init__.py @@ -0,0 +1,9 @@ +"""Monte Carlo coverage simulations for SSM.""" + +from montecover.ssm.ssm_mar_ate import SSMMarATECoverageSimulation +from montecover.ssm.ssm_nonig_ate import SSMNonIgnorableATECoverageSimulation + +__all__ = [ + "SSMMarATECoverageSimulation", + "SSMNonIgnorableATECoverageSimulation", +] diff --git a/monte-cover/src/montecover/ssm/ssm_mar_ate.py b/monte-cover/src/montecover/ssm/ssm_mar_ate.py new file mode 100644 index 0000000..ef86363 --- /dev/null +++ b/monte-cover/src/montecover/ssm/ssm_mar_ate.py @@ -0,0 +1,123 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +from doubleml.datasets import make_ssm_data + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class SSMMarATECoverageSimulation(BaseSimulation): + """Simulation class for coverage properties of DoubleMLSSM with missing at random for ATE estimation.""" + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m", "ml_pi"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + self.oracle_values = dict() + self.oracle_values["theta"] = self.dgp_parameters["theta"] + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + learner_pi_name, ml_pi = create_learner_from_config(learner_config["ml_pi"]) + + # Model + dml_model = dml.DoubleMLSSM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + ml_pi=ml_pi, + score="missing-at-random", + ) + dml_model.fit() + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=self.oracle_values["theta"], + confint=dml_model.confint(level=level), + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "Learner pi": learner_pi_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "Learner pi", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_ssm_data( + theta=dgp_params["theta"], + n_obs=dgp_params["n_obs"], + dim_x=dgp_params["dim_x"], + mar=True, + return_type="DataFrame", + ) + dml_data = dml.DoubleMLData(data, "y", "d", s_col="s") + return dml_data diff --git a/monte-cover/src/montecover/ssm/ssm_nonig_ate.py b/monte-cover/src/montecover/ssm/ssm_nonig_ate.py new file mode 100644 index 0000000..8c82f29 --- /dev/null +++ b/monte-cover/src/montecover/ssm/ssm_nonig_ate.py @@ -0,0 +1,125 @@ +from typing import Any, Dict, Optional + +import doubleml as dml +from doubleml.datasets import make_ssm_data + +from montecover.base import BaseSimulation +from montecover.utils import create_learner_from_config + + +class SSMNonIgnorableATECoverageSimulation(BaseSimulation): + """ + Simulation class for coverage properties of DoubleMLSSM with nonignorable nonresponse for ATE estimation. + """ + + def __init__( + self, + config_file: str, + suppress_warnings: bool = True, + log_level: str = "INFO", + log_file: Optional[str] = None, + ): + super().__init__( + config_file=config_file, + suppress_warnings=suppress_warnings, + log_level=log_level, + log_file=log_file, + ) + + # Calculate oracle values + self._calculate_oracle_values() + + def _process_config_parameters(self): + """Process simulation-specific parameters from config""" + # Process ML models in parameter grid + assert "learners" in self.dml_parameters, "No learners specified in the config file" + + required_learners = ["ml_g", "ml_m", "ml_pi"] + for learner in self.dml_parameters["learners"]: + for ml in required_learners: + assert ml in learner, f"No {ml} specified in the config file" + + def _calculate_oracle_values(self): + """Calculate oracle values for the simulation.""" + self.logger.info("Calculating oracle values") + + self.oracle_values = dict() + self.oracle_values["theta"] = self.dgp_parameters["theta"] + + def run_single_rep(self, dml_data: dml.DoubleMLData, dml_params: Dict[str, Any]) -> Dict[str, Any]: + """Run a single repetition with the given parameters.""" + # Extract parameters + learner_config = dml_params["learners"] + learner_g_name, ml_g = create_learner_from_config(learner_config["ml_g"]) + learner_m_name, ml_m = create_learner_from_config(learner_config["ml_m"]) + learner_pi_name, ml_pi = create_learner_from_config(learner_config["ml_pi"]) + + # Model + dml_model = dml.DoubleMLSSM( + obj_dml_data=dml_data, + ml_g=ml_g, + ml_m=ml_m, + ml_pi=ml_pi, + score="nonignorable", + ) + dml_model.fit() + + result = { + "coverage": [], + } + for level in self.confidence_parameters["level"]: + level_result = dict() + level_result["coverage"] = self._compute_coverage( + thetas=dml_model.coef, + oracle_thetas=self.oracle_values["theta"], + confint=dml_model.confint(level=level), + joint_confint=None, + ) + + # add parameters to the result + for res_metric in level_result.values(): + res_metric.update( + { + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "Learner pi": learner_pi_name, + "level": level, + } + ) + for key, res in level_result.items(): + result[key].append(res) + + return result + + def summarize_results(self): + """Summarize the simulation results.""" + self.logger.info("Summarizing simulation results") + + # Group by parameter combinations + groupby_cols = ["Learner g", "Learner m", "Learner pi", "level"] + aggregation_dict = { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "repetition": "count", + } + + # Aggregate results (possibly multiple result dfs) + result_summary = dict() + for result_name, result_df in self.results.items(): + result_summary[result_name] = result_df.groupby(groupby_cols).agg(aggregation_dict).reset_index() + self.logger.debug(f"Summarized {result_name} results") + + return result_summary + + def _generate_dml_data(self, dgp_params: Dict[str, Any]) -> dml.DoubleMLData: + """Generate data for the simulation.""" + data = make_ssm_data( + theta=dgp_params["theta"], + n_obs=dgp_params["n_obs"], + dim_x=dgp_params["dim_x"], + mar=False, + return_type="DataFrame", + ) + dml_data = dml.DoubleMLData(data, "y", "d", z_cols="z", s_col="s") + return dml_data diff --git a/results/irm/apo_config.yml b/results/irm/apo_config.yml new file mode 100644 index 0000000..5f31101 --- /dev/null +++ b/results/irm/apo_config.yml @@ -0,0 +1,49 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + n_obs: + - 500 + n_levels: + - 2 + linear: + - true +learner_definitions: + linear: &id001 + name: Linear + logit: &id002 + name: Logistic + lgbmr: &id003 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + lgbmc: &id004 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 +dml_parameters: + treatment_level: + - 0 + - 1 + - 2 + trimming_threshold: + - 0.01 + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id004 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/apo_coverage.csv b/results/irm/apo_coverage.csv new file mode 100644 index 0000000..c352967 --- /dev/null +++ b/results/irm/apo_coverage.csv @@ -0,0 +1,25 @@ +Learner g,Learner m,Treatment Level,level,Coverage,CI Length,Bias,repetition +LGBM Regr.,LGBM Clas.,0,0.9,0.923,8.535462056273152,2.0255827803361752,1000 +LGBM Regr.,LGBM Clas.,0,0.95,0.968,10.170630351290903,2.0255827803361752,1000 +LGBM Regr.,LGBM Clas.,1,0.9,0.915,34.31276092012859,8.463655678765429,1000 +LGBM Regr.,LGBM Clas.,1,0.95,0.969,40.886176442476604,8.463655678765429,1000 +LGBM Regr.,LGBM Clas.,2,0.9,0.901,33.644464659560505,8.44195706768521,1000 +LGBM Regr.,LGBM Clas.,2,0.95,0.953,40.08985233177513,8.44195706768521,1000 +LGBM Regr.,Logistic,0,0.9,0.913,5.611262078064704,1.3129445023644262,1000 +LGBM Regr.,Logistic,0,0.95,0.96,6.686231164049146,1.3129445023644262,1000 +LGBM Regr.,Logistic,1,0.9,0.906,7.131142409013338,1.601689163860689,1000 +LGBM Regr.,Logistic,1,0.95,0.952,8.497280281526653,1.601689163860689,1000 +LGBM Regr.,Logistic,2,0.9,0.926,7.123330875617923,1.5793207930346633,1000 +LGBM Regr.,Logistic,2,0.95,0.961,8.487972265379696,1.5793207930346633,1000 +Linear,LGBM Clas.,0,0.9,0.91,5.450702479855432,1.2788345107461965,1000 +Linear,LGBM Clas.,0,0.95,0.953,6.494912602502952,1.2788345107461965,1000 +Linear,LGBM Clas.,1,0.9,0.934,9.871742629461385,2.0208159577876965,1000 +Linear,LGBM Clas.,1,0.95,0.977,11.762906863787386,2.0208159577876965,1000 +Linear,LGBM Clas.,2,0.9,0.935,7.196139854742809,1.5705772035604926,1000 +Linear,LGBM Clas.,2,0.95,0.971,8.574729515081229,1.5705772035604926,1000 +Linear,Logistic,0,0.9,0.915,5.333252473490304,1.262628265253654,1000 +Linear,Logistic,0,0.95,0.951,6.354962287965668,1.262628265253654,1000 +Linear,Logistic,1,0.9,0.907,5.409257288463179,1.2841258169966927,1000 +Linear,Logistic,1,0.95,0.947,6.445527610957087,1.2841258169966927,1000 +Linear,Logistic,2,0.9,0.909,5.362180231077127,1.2686475779103141,1000 +Linear,Logistic,2,0.95,0.949,6.389431837167295,1.2686475779103141,1000 diff --git a/results/irm/apo_metadata.csv b/results/irm/apo_metadata.csv new file mode 100644 index 0000000..601c446 --- /dev/null +++ b/results/irm/apo_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,APOCoverageSimulation,2025-06-04 15:38,79.44047049681346,3.12.3,scripts/irm/apo_config.yml diff --git a/results/irm/apos_causal_contrast.csv b/results/irm/apos_causal_contrast.csv new file mode 100644 index 0000000..5bc012d --- /dev/null +++ b/results/irm/apos_causal_contrast.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition +LGBM Regr.,LGBM Clas.,0.9,0.9035,33.48965631859352,8.5048896967406,0.918,39.64896208370754,1000 +LGBM Regr.,LGBM Clas.,0.95,0.9575,39.905386815920046,8.5048896967406,0.967,45.48932863359306,1000 +LGBM Regr.,Logistic,0.9,0.951,5.329365839934695,1.0762013117643536,0.951,6.300702382026109,1000 +LGBM Regr.,Logistic,0.95,0.9775,6.3503310784371845,1.0762013117643536,0.979,7.232560605885723,1000 +Linear,LGBM Clas.,0.9,0.965,6.721615473453926,1.3234802376469883,0.977,7.963378099405947,1000 +Linear,LGBM Clas.,0.95,0.989,8.009298839747467,1.3234802376469883,0.996,9.138185238253229,1000 +Linear,Logistic,0.9,0.8675,1.146829074707023,0.30695471324696894,0.836,1.3542396130508736,1000 +Linear,Logistic,0.95,0.918,1.366531128374668,0.30695471324696894,0.912,1.5554413731229015,1000 diff --git a/results/irm/apos_config.yml b/results/irm/apos_config.yml new file mode 100644 index 0000000..40be90e --- /dev/null +++ b/results/irm/apos_config.yml @@ -0,0 +1,49 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + n_obs: + - 500 + n_levels: + - 2 + linear: + - true +learner_definitions: + linear: &id001 + name: Linear + logit: &id002 + name: Logistic + lgbmr: &id003 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + lgbmc: &id004 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 +dml_parameters: + treatment_levels: + - - 0 + - 1 + - 2 + trimming_threshold: + - 0.01 + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id004 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/apos_coverage.csv b/results/irm/apos_coverage.csv new file mode 100644 index 0000000..4e630ec --- /dev/null +++ b/results/irm/apos_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition +LGBM Regr.,LGBM Clas.,0.9,0.9216666666666666,25.302332120467288,6.206005134767613,0.926,32.42413576207192,1000 +LGBM Regr.,LGBM Clas.,0.95,0.9623333333333334,30.149588308899382,6.206005134767613,0.975,36.66314428285753,1000 +LGBM Regr.,Logistic,0.9,0.9166666666666666,6.604807969876808,1.492507532099351,0.925,8.124328671640002,1000 +LGBM Regr.,Logistic,0.95,0.963,7.870114114502647,1.492507532099351,0.963,9.2992358185068,1000 +Linear,LGBM Clas.,0.9,0.927,7.536037974003498,1.6449388088327628,0.936,9.335529567192953,1000 +Linear,LGBM Clas.,0.95,0.968,8.979743104891375,1.6449388088327628,0.974,10.660074664816488,1000 +Linear,Logistic,0.9,0.9056666666666666,5.378673132414481,1.2747688188643604,0.907,5.79782063135373,1000 +Linear,Logistic,0.95,0.9536666666666667,6.409084341251623,1.2747688188643604,0.953,6.82033780870828,1000 diff --git a/results/irm/apos_metadata.csv b/results/irm/apos_metadata.csv new file mode 100644 index 0000000..f463b04 --- /dev/null +++ b/results/irm/apos_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,APOSCoverageSimulation,2025-06-05 07:37,6.892837846279145,3.12.9,scripts/irm/apos_config.yml diff --git a/results/irm/cvar_Y0_coverage.csv b/results/irm/cvar_Y0_coverage.csv new file mode 100644 index 0000000..4c2e567 --- /dev/null +++ b/results/irm/cvar_Y0_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,repetition +LGBM Regr.,LGBM Clas.,0.9,0.85,0.5670142676988336,0.15380806524383983,200 +LGBM Regr.,LGBM Clas.,0.95,0.917142857142857,0.6756391725078731,0.15380806524383983,200 +LGBM Regr.,Logistic,0.9,0.7985714285714286,0.4385225256106473,0.13981066432860312,200 +LGBM Regr.,Logistic,0.95,0.8857142857142857,0.5225318183474872,0.13981066432860312,200 +Linear,LGBM Clas.,0.9,0.807142857142857,0.5780831437073561,0.16505729067291136,200 +Linear,LGBM Clas.,0.95,0.8778571428571429,0.6888285517757731,0.16505729067291136,200 +Linear,Logistic,0.9,0.7535714285714286,0.46127673668330255,0.14620789056223912,200 +Linear,Logistic,0.95,0.8271428571428571,0.5496451331545211,0.14620789056223912,200 diff --git a/results/irm/cvar_Y1_coverage.csv b/results/irm/cvar_Y1_coverage.csv new file mode 100644 index 0000000..5319243 --- /dev/null +++ b/results/irm/cvar_Y1_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,repetition +LGBM Regr.,LGBM Clas.,0.9,0.9214285714285714,0.1913460262511472,0.0431261794473061,200 +LGBM Regr.,LGBM Clas.,0.95,0.9592857142857143,0.22800285319744668,0.0431261794473061,200 +LGBM Regr.,Logistic,0.9,0.9192857142857143,0.18097901112163345,0.041041888975454605,200 +LGBM Regr.,Logistic,0.95,0.9614285714285714,0.2156497927499421,0.041041888975454605,200 +Linear,LGBM Clas.,0.9,0.9164285714285714,0.2132575331275393,0.046942310613367344,200 +Linear,LGBM Clas.,0.95,0.9621428571428571,0.2541120240203392,0.046942310613367344,200 +Linear,Logistic,0.9,0.9228571428571429,0.1968307741542601,0.04490834814057729,200 +Linear,Logistic,0.95,0.9557142857142857,0.23453833342391914,0.04490834814057729,200 diff --git a/results/irm/cvar_config.yml b/results/irm/cvar_config.yml new file mode 100644 index 0000000..5157d7e --- /dev/null +++ b/results/irm/cvar_config.yml @@ -0,0 +1,65 @@ +simulation_parameters: + repetitions: 200 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + n_obs: + - 5000 + dim_x: + - 5 +learner_definitions: + linear: &id001 + name: Linear + logit: &id002 + name: Logistic + lgbmr: &id003 + name: LGBM Regr. + params: + n_estimators: 200 + learning_rate: 0.05 + num_leaves: 15 + max_depth: 5 + min_child_samples: 10 + subsample: 0.9 + colsample_bytree: 0.9 + reg_alpha: 0.0 + reg_lambda: 0.1 + random_state: 42 + lgbmc: &id004 + name: LGBM Clas. + params: + n_estimators: 200 + learning_rate: 0.05 + num_leaves: 15 + max_depth: 5 + min_child_samples: 10 + subsample: 0.9 + colsample_bytree: 0.9 + reg_alpha: 0.0 + reg_lambda: 0.1 + random_state: 42 +dml_parameters: + tau_vec: + - - 0.2 + - 0.3 + - 0.4 + - 0.5 + - 0.6 + - 0.7 + - 0.8 + trimming_threshold: + - 0.01 + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id004 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/cvar_effect_coverage.csv b/results/irm/cvar_effect_coverage.csv new file mode 100644 index 0000000..c54c59b --- /dev/null +++ b/results/irm/cvar_effect_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition +LGBM Regr.,LGBM Clas.,0.9,0.85,0.5796241888849147,0.15567372871154336,0.87,0.7009270285655491,200 +LGBM Regr.,LGBM Clas.,0.95,0.9292857142857143,0.6906648203634905,0.15567372871154336,0.92,0.8074443628687097,200 +LGBM Regr.,Logistic,0.9,0.8207142857142857,0.4505521904112434,0.1384841777144103,0.795,0.5426740272513005,200 +LGBM Regr.,Logistic,0.95,0.8928571428571429,0.5368660480740288,0.1384841777144103,0.88,0.6261735233895037,200 +Linear,LGBM Clas.,0.9,0.825,0.604031305594578,0.17336373457185203,0.78,0.7151775160876557,200 +Linear,LGBM Clas.,0.95,0.8971428571428571,0.719747693716827,0.17336373457185203,0.85,0.8269744576928215,200 +Linear,Logistic,0.9,0.775,0.4860331654639814,0.1489380406801104,0.74,0.5698020622218235,200 +Linear,Logistic,0.95,0.85,0.5791442375130592,0.1489380406801104,0.81,0.6599452421128806,200 diff --git a/results/irm/cvar_metadata.csv b/results/irm/cvar_metadata.csv new file mode 100644 index 0000000..63df236 --- /dev/null +++ b/results/irm/cvar_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,CVARCoverageSimulation,2025-06-05 12:27,9.002358218034109,3.12.9,scripts/irm/cvar_config.yml diff --git a/results/irm/irm_ate_config.yml b/results/irm/irm_ate_config.yml new file mode 100644 index 0000000..d19a50a --- /dev/null +++ b/results/irm/irm_ate_config.yml @@ -0,0 +1,61 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + theta: + - 0.5 + n_obs: + - 500 + dim_x: + - 20 +learner_definitions: + lasso: &id001 + name: LassoCV + logit: &id002 + name: Logistic + rfr: &id003 + name: RF Regr. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + rfc: &id004 + name: RF Clas. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + lgbmr: &id005 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + lgbmc: &id006 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id001 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id005 + ml_m: *id006 + - ml_g: *id005 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id006 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/irm_ate_coverage.csv b/results/irm/irm_ate_coverage.csv index 9ed9b19..69935e2 100644 --- a/results/irm/irm_ate_coverage.csv +++ b/results/irm/irm_ate_coverage.csv @@ -1,9 +1,15 @@ Learner g,Learner m,level,Coverage,CI Length,Bias,repetition -Lasso,Logistic Regression,0.9,0.875,0.467771360497192,0.12336884150536215,1000 -Lasso,Logistic Regression,0.95,0.935,0.5573839547492131,0.12336884150536215,1000 -Lasso,Random Forest,0.9,0.904,0.5986207495475606,0.14643826768714088,1000 -Lasso,Random Forest,0.95,0.954,0.7133006185396007,0.14643826768714088,1000 -Random Forest,Logistic Regression,0.9,0.802,0.515856329530175,0.1497402299287362,1000 -Random Forest,Logistic Regression,0.95,0.885,0.6146807293424889,0.1497402299287362,1000 -Random Forest,Random Forest,0.9,0.898,0.625184528129902,0.1492705643144829,1000 -Random Forest,Random Forest,0.95,0.948,0.744953313017455,0.1492705643144829,1000 +LGBM Regr.,LGBM Clas.,0.9,0.934,1.2170294870742426,0.28949513855358994,1000 +LGBM Regr.,LGBM Clas.,0.95,0.974,1.4501800790686068,0.28949513855358994,1000 +LGBM Regr.,Logistic,0.9,0.909,0.764475766453259,0.1851430173917419,1000 +LGBM Regr.,Logistic,0.95,0.955,0.9109290606478061,0.1851430173917419,1000 +LassoCV,LGBM Clas.,0.9,0.931,1.099023356166903,0.26125287479628606,1000 +LassoCV,LGBM Clas.,0.95,0.973,1.30956710126707,0.26125287479628606,1000 +LassoCV,Logistic,0.9,0.912,0.6518264483447356,0.15950540890700682,1000 +LassoCV,Logistic,0.95,0.962,0.7766991189934198,0.15950540890700682,1000 +LassoCV,RF Clas.,0.9,0.921,0.575659955473877,0.1328787221360119,1000 +LassoCV,RF Clas.,0.95,0.965,0.6859411449040855,0.1328787221360119,1000 +RF Regr.,Logistic,0.9,0.923,0.7334971446280483,0.1805477395572802,1000 +RF Regr.,Logistic,0.95,0.957,0.8740157559784838,0.1805477395572802,1000 +RF Regr.,RF Clas.,0.9,0.908,0.6176696649606507,0.1493303814432326,1000 +RF Regr.,RF Clas.,0.95,0.955,0.7359988012486619,0.1493303814432326,1000 diff --git a/results/irm/irm_ate_metadata.csv b/results/irm/irm_ate_metadata.csv new file mode 100644 index 0000000..75d3d5f --- /dev/null +++ b/results/irm/irm_ate_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,IRMATECoverageSimulation,2025-06-03 16:21,13.744510825475057,3.12.9,scripts/irm/irm_ate_config.yml diff --git a/results/irm/irm_ate_sensitivity.csv b/results/irm/irm_ate_sensitivity.csv deleted file mode 100644 index 6e10770..0000000 --- a/results/irm/irm_ate_sensitivity.csv +++ /dev/null @@ -1,9 +0,0 @@ -Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition -LGBM,LGBM,0.9,0.112,0.266748233354866,0.17891290135375168,0.962,1.0,0.12379347892727971,0.05409589192160397,0.04254708028278409,0.32210978560617337,500 -LGBM,LGBM,0.95,0.318,0.31785012462427936,0.17891290135375168,0.998,1.0,0.12379347892727971,0.03441021667548556,0.04254708028278409,0.32210978560617337,500 -LGBM,Logistic Regr.,0.9,0.292,0.2577778025822409,0.14922926552528684,1.0,1.0,0.10066571951295798,0.03493291437943745,0.029012990398602386,0.2979424530565633,500 -LGBM,Logistic Regr.,0.95,0.548,0.30716119707955875,0.14922926552528684,1.0,1.0,0.10066571951295798,0.01869752301454861,0.029012990398602386,0.2979424530565633,500 -Linear Reg.,LGBM,0.9,0.122,0.2675665174758639,0.17873104426193565,0.964,1.0,0.12647219547900976,0.05512739569620471,0.04513946154555041,0.31857328180879246,500 -Linear Reg.,LGBM,0.95,0.314,0.31882517029399604,0.17873104426193565,0.998,1.0,0.12647219547900976,0.035017588858111126,0.04513946154555041,0.31857328180879246,500 -Linear Reg.,Logistic Regr.,0.9,0.86,0.2592281409673473,0.08970251629543106,1.0,1.0,0.06300567732617765,0.006719868195974334,0.05720312141493262,0.23496869651774063,500 -Linear Reg.,Logistic Regr.,0.95,0.974,0.30888938185760084,0.08970251629543106,1.0,1.0,0.06300567732617765,0.0014945204694376396,0.05720312141493262,0.23496869651774063,500 diff --git a/results/irm/irm_ate_sensitivity_config.yml b/results/irm/irm_ate_sensitivity_config.yml new file mode 100644 index 0000000..74143aa --- /dev/null +++ b/results/irm/irm_ate_sensitivity_config.yml @@ -0,0 +1,53 @@ +simulation_parameters: + repetitions: 500 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + theta: + - 5.0 + n_obs: + - 5000 + trimming_threshold: + - 0.05 + var_epsilon_y: + - 1.0 + linear: + - false + gamma_a: + - 0.198 + beta_a: + - 0.582 +learner_definitions: + linear: &id001 + name: Linear + logit: &id002 + name: Logistic + lgbmr: &id003 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + lgbmc: &id004 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id004 + trimming_threshold: + - 0.05 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/irm_ate_sensitivity_coverage.csv b/results/irm/irm_ate_sensitivity_coverage.csv new file mode 100644 index 0000000..4a63af0 --- /dev/null +++ b/results/irm/irm_ate_sensitivity_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition +LGBM Regr.,LGBM Clas.,0.9,0.104,0.2668157568060522,0.18026525524152154,0.966,1.0,0.12473788062439613,0.05508904509343494,0.04380159419714633,0.3234512135135403,500 +LGBM Regr.,LGBM Clas.,0.95,0.276,0.31793058377898364,0.18026525524152154,1.0,1.0,0.12473788062439613,0.03540613542014128,0.04380159419714633,0.3234512135135403,500 +LGBM Regr.,Logistic,0.9,0.248,0.25763009474845106,0.14980027719730954,0.998,1.0,0.10093696287018208,0.03536055959101725,0.027371865522670537,0.2987135019044584,500 +LGBM Regr.,Logistic,0.95,0.552,0.306985192339855,0.14980027719730954,1.0,1.0,0.10093696287018208,0.01878662839660344,0.027371865522670537,0.2987135019044584,500 +Linear,LGBM Clas.,0.9,0.112,0.26715210077204105,0.17865402724192433,0.962,1.0,0.12630248546657805,0.055046351043189806,0.04436057060203107,0.31869461365134216,500 +Linear,LGBM Clas.,0.95,0.282,0.318331362333959,0.17865402724192433,0.998,1.0,0.12630248546657805,0.03487837647302397,0.04436057060203107,0.31869461365134216,500 +Linear,Logistic,0.9,0.852,0.2589605314211221,0.09016543407251673,1.0,1.0,0.06325807709105473,0.00696464260372732,0.05686871205333632,0.23561615221802584,500 +Linear,Logistic,0.95,0.978,0.30857050541538944,0.09016543407251673,1.0,1.0,0.06325807709105473,0.0015852998947931969,0.05686871205333632,0.23561615221802584,500 diff --git a/results/irm/irm_ate_sensitivity_metadata.csv b/results/irm/irm_ate_sensitivity_metadata.csv index 327aa8c..d859488 100644 --- a/results/irm/irm_ate_sensitivity_metadata.csv +++ b/results/irm/irm_ate_sensitivity_metadata.csv @@ -1,2 +1,2 @@ -DoubleML Version,Script,Date,Total Runtime (seconds),Python Version -0.10.dev0,irm_ate_sensitivity.py,2025-05-22 14:48:09,6858.955473899841,3.12.10 +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,IRMATESensitivityCoverageSimulation,2025-06-04 10:16,29.540068797270457,3.12.3,scripts/irm/irm_ate_sensitivity_config.yml diff --git a/results/irm/irm_atte_config.yml b/results/irm/irm_atte_config.yml new file mode 100644 index 0000000..2d3c69a --- /dev/null +++ b/results/irm/irm_atte_config.yml @@ -0,0 +1,61 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + theta: + - 0.5 + n_obs: + - 500 + dim_x: + - 20 +learner_definitions: + lasso: &id001 + name: LassoCV + logit: &id002 + name: Logistic + rfr: &id003 + name: RF Regr. + params: + n_estimators: 200 + max_features: 20 + max_depth: 20 + min_samples_leaf: 2 + rfc: &id004 + name: RF Clas. + params: + n_estimators: 200 + max_features: 20 + max_depth: 20 + min_samples_leaf: 20 + lgbmr: &id005 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + lgbmc: &id006 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id001 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id005 + ml_m: *id006 + - ml_g: *id005 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id006 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/irm_atte_coverage.csv b/results/irm/irm_atte_coverage.csv index 5255488..082860b 100644 --- a/results/irm/irm_atte_coverage.csv +++ b/results/irm/irm_atte_coverage.csv @@ -1,9 +1,15 @@ Learner g,Learner m,level,Coverage,CI Length,Bias,repetition -Lasso,Logistic Regression,0.9,0.891,0.5331759172799808,0.1350391266439693,1000 -Lasso,Logistic Regression,0.95,0.937,0.6353182910443252,0.1350391266439693,1000 -Lasso,Random Forest,0.9,0.897,0.7382774835343612,0.18122786303688493,1000 -Lasso,Random Forest,0.95,0.948,0.8797118811149396,0.18122786303688493,1000 -Random Forest,Logistic Regression,0.9,0.872,0.5528818976331068,0.15060693615944673,1000 -Random Forest,Logistic Regression,0.95,0.918,0.6587994149202312,0.15060693615944673,1000 -Random Forest,Random Forest,0.9,0.899,0.7455843448660364,0.18245405782788798,1000 -Random Forest,Random Forest,0.95,0.948,0.8884185434072277,0.18245405782788798,1000 +LGBM Regr.,LGBM Clas.,0.9,0.935,1.4682839506185927,0.33857239275160667,1000 +LGBM Regr.,LGBM Clas.,0.95,0.974,1.7495682382536595,0.33857239275160667,1000 +LGBM Regr.,Logistic,0.9,0.903,0.827444945949616,0.20284041708199016,1000 +LGBM Regr.,Logistic,0.95,0.957,0.9859614659188063,0.20284041708199016,1000 +LassoCV,LGBM Clas.,0.9,0.916,1.364184861790926,0.3340226877296898,1000 +LassoCV,LGBM Clas.,0.95,0.968,1.625526523183968,0.3340226877296898,1000 +LassoCV,Logistic,0.9,0.913,0.7758018959411505,0.1948636425368796,1000 +LassoCV,Logistic,0.95,0.96,0.9244249763431417,0.1948636425368796,1000 +LassoCV,RF Clas.,0.9,0.892,0.5725347715806113,0.14886905941160222,1000 +LassoCV,RF Clas.,0.95,0.94,0.6822172586107998,0.14886905941160222,1000 +RF Regr.,Logistic,0.9,0.899,0.8139922164772362,0.2045901899504402,1000 +RF Regr.,Logistic,0.95,0.952,0.9699315500481204,0.2045901899504402,1000 +RF Regr.,RF Clas.,0.9,0.885,0.5863252811302729,0.15402934863308917,1000 +RF Regr.,RF Clas.,0.95,0.93,0.6986496642686135,0.15402934863308917,1000 diff --git a/results/irm/irm_atte_metadata.csv b/results/irm/irm_atte_metadata.csv new file mode 100644 index 0000000..92114f4 --- /dev/null +++ b/results/irm/irm_atte_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,IRMATTECoverageSimulation,2025-06-03 16:07,13.51489497423172,3.12.9,scripts/irm/irm_atte_config.yml diff --git a/results/irm/irm_atte_sensitivity.csv b/results/irm/irm_atte_sensitivity.csv deleted file mode 100644 index 7737483..0000000 --- a/results/irm/irm_atte_sensitivity.csv +++ /dev/null @@ -1,9 +0,0 @@ -Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition -LGBM,LGBM,0.9,0.702,0.348892741328716,0.1353547312940485,0.95,1.0,0.10509293446782589,0.024288450266824572,0.06499582700735684,0.25876253126104387,500 -LGBM,LGBM,0.95,0.826,0.41573134306126747,0.1353547312940485,0.982,1.0,0.10509293446782589,0.012452066983782116,0.06499582700735684,0.25876253126104387,500 -LGBM,Logistic Regr.,0.9,0.714,0.34666910502599596,0.13078975736827733,0.964,1.0,0.0981701852821437,0.022246998237972524,0.06545442330246612,0.2589976808508975,500 -LGBM,Logistic Regr.,0.95,0.834,0.41308171698108886,0.13078975736827733,0.984,1.0,0.0981701852821437,0.010949342084431735,0.06545442330246612,0.2589976808508975,500 -Linear Reg.,LGBM,0.9,0.754,0.3496967006881292,0.12455057551341779,0.962,1.0,0.09867724125956995,0.0202175935504151,0.06504946816195568,0.2439341901457105,500 -Linear Reg.,LGBM,0.95,0.858,0.4166893197247619,0.12455057551341779,0.986,1.0,0.09867724125956995,0.009856683129418066,0.06504946816195568,0.2439341901457105,500 -Linear Reg.,Logistic Regr.,0.9,0.948,0.3502540540945954,0.07444772768321123,0.996,1.0,0.05840145836627322,0.004181143741279689,0.09544484272838333,0.17545346180009289,500 -Linear Reg.,Logistic Regr.,0.95,0.976,0.41735344727108903,0.07444772768321123,0.998,1.0,0.05840145836627322,0.0015739249195781788,0.09544484272838333,0.17545346180009289,500 diff --git a/results/irm/irm_atte_sensitivity_config.yml b/results/irm/irm_atte_sensitivity_config.yml new file mode 100644 index 0000000..bf06bc6 --- /dev/null +++ b/results/irm/irm_atte_sensitivity_config.yml @@ -0,0 +1,53 @@ +simulation_parameters: + repetitions: 500 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + theta: + - 5.0 + n_obs: + - 5000 + trimming_threshold: + - 0.05 + var_epsilon_y: + - 1.0 + linear: + - false + gamma_a: + - 0.151 + beta_a: + - 0.582 +learner_definitions: + linear: &id001 + name: Linear + logit: &id002 + name: Logistic + lgbmr: &id003 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + lgbmc: &id004 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id004 + trimming_threshold: + - 0.05 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/irm_atte_sensitivity_coverage.csv b/results/irm/irm_atte_sensitivity_coverage.csv new file mode 100644 index 0000000..075e58b --- /dev/null +++ b/results/irm/irm_atte_sensitivity_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,Coverage (Lower),Coverage (Upper),RV,RVa,Bias (Lower),Bias (Upper),repetition +LGBM Regr.,LGBM Clas.,0.9,0.724,0.3490468150328256,0.1335281439881145,0.95,1.0,0.1038722101661171,0.023169595411329586,0.06489839293328352,0.2564847068835367,500 +LGBM Regr.,LGBM Clas.,0.95,0.848,0.4159149332033151,0.1335281439881145,0.984,1.0,0.1038722101661171,0.011313984709802882,0.06489839293328352,0.2564847068835367,500 +LGBM Regr.,Logistic,0.9,0.712,0.34686964803507303,0.13021401649938852,0.968,1.0,0.09785329627117717,0.02088828773212853,0.06357450466836227,0.25752793576799426,500 +LGBM Regr.,Logistic,0.95,0.862,0.4133206787152529,0.13021401649938852,0.986,1.0,0.09785329627117717,0.010006537111844464,0.06357450466836227,0.25752793576799426,500 +Linear,LGBM Clas.,0.9,0.778,0.34985166701129805,0.12304199369830472,0.968,1.0,0.09780901861894682,0.018644199845226802,0.06344115655765417,0.24215873513324584,500 +Linear,LGBM Clas.,0.95,0.88,0.41687397348802135,0.12304199369830472,0.988,1.0,0.09780901861894682,0.008642793757112277,0.06344115655765417,0.24215873513324584,500 +Linear,Logistic,0.9,0.956,0.35055289955459806,0.0738271575494762,0.996,1.0,0.05790900052274957,0.004015909139992085,0.0975242247284002,0.17368819501662186,500 +Linear,Logistic,0.95,0.98,0.41770954360024026,0.0738271575494762,0.998,1.0,0.05790900052274957,0.0013840310375437,0.0975242247284002,0.17368819501662186,500 diff --git a/results/irm/irm_atte_sensitivity_metadata.csv b/results/irm/irm_atte_sensitivity_metadata.csv index 2fc4ba5..e0ae81f 100644 --- a/results/irm/irm_atte_sensitivity_metadata.csv +++ b/results/irm/irm_atte_sensitivity_metadata.csv @@ -1,2 +1,2 @@ -DoubleML Version,Script,Date,Total Runtime (seconds),Python Version -0.10.dev0,irm_atte_sensitivity.py,2025-05-22 14:47:30,6819.7349536418915,3.12.10 +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,IRMATTESensitivityCoverageSimulation,2025-06-04 10:49,30.47395207484563,3.12.3,scripts/irm/irm_atte_sensitivity_config.yml diff --git a/results/irm/irm_cate_config.yml b/results/irm/irm_cate_config.yml new file mode 100644 index 0000000..c1206fe --- /dev/null +++ b/results/irm/irm_cate_config.yml @@ -0,0 +1,63 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + n_obs: + - 500 + p: + - 10 + support_size: + - 5 + n_x: + - 1 +learner_definitions: + linear: &id001 + name: Linear + logit: &id002 + name: Logistic + rfr: &id003 + name: RF Regr. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + rfc: &id004 + name: RF Clas. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + lgbmr: &id005 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + lgbmc: &id006 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id001 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id005 + ml_m: *id006 + - ml_g: *id005 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id006 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/irm_cate_coverage.csv b/results/irm/irm_cate_coverage.csv index 788025e..000353f 100644 --- a/results/irm/irm_cate_coverage.csv +++ b/results/irm/irm_cate_coverage.csv @@ -1,9 +1,15 @@ Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition -LGBM,LGBM,0.9,0.9356475000000001,0.6670555194228769,0.1482947174222056,1.0,1.689294724011131,1000 -LGBM,LGBM,0.95,0.9723379999999999,0.7948456764390678,0.1482947174222056,1.0,1.6897571332817831,1000 -LGBM,Logistic Regression,0.9,0.8889914999999999,0.2354452920928757,0.05850699314525499,0.996,0.5957379206984681,1000 -LGBM,Logistic Regression,0.95,0.942037,0.28055036951027373,0.05850699314525499,0.996,0.5990789624833718,1000 -Lasso,LGBM,0.9,0.896289,0.6428742805642967,0.15829333167540963,1.0,1.6320898187101593,1000 -Lasso,LGBM,0.95,0.9491539999999999,0.7660319531461224,0.15829333167540963,1.0,1.6337799284392311,1000 -Lasso,Logistic Regression,0.9,0.8892920000000001,0.24738399667726244,0.061668240816652016,0.998,0.629839636678611,1000 -Lasso,Logistic Regression,0.95,0.9413365,0.29477621345410787,0.061668240816652016,0.997,0.6288417962868703,1000 +LGBM Regr.,LGBM Clas.,0.9,0.92823,1.0570613395560353,0.24271123774219397,1.0,2.6463187180937235,1000 +LGBM Regr.,LGBM Clas.,0.95,0.96976,1.259566274489309,0.24271123774219397,1.0,2.652748380799862,1000 +LGBM Regr.,Logistic,0.9,0.90431,0.4603024759110742,0.10988090751139157,0.996,1.157310107019297,1000 +LGBM Regr.,Logistic,0.95,0.94966,0.5484842298414058,0.10988090751139157,0.998,1.1595510257286086,1000 +Linear,LGBM Clas.,0.9,0.90906,1.0431407600660088,0.25074855426124026,0.999,2.6288585357985186,1000 +Linear,LGBM Clas.,0.95,0.95858,1.2429788809380993,0.25074855426124026,0.998,2.6131478268009114,1000 +Linear,Logistic,0.9,0.9102100000000001,0.4767188958750692,0.11111755516576532,0.999,1.1958150906203837,1000 +Linear,Logistic,0.95,0.95427,0.5680456007484012,0.11111755516576532,0.999,1.1961020063274401,1000 +Linear,RF Clas.,0.9,0.91604,0.5102732447654866,0.11800460653781775,0.999,1.2834578728038433,1000 +Linear,RF Clas.,0.95,0.95914,0.6080280735182108,0.11800460653781775,0.999,1.2816066988048282,1000 +RF Regr.,Logistic,0.9,0.902,0.4598055139749778,0.10994809691322913,0.999,1.1600848822586207,1000 +RF Regr.,Logistic,0.95,0.9499099999999999,0.5478920631704775,0.10994809691322913,0.998,1.1551525914690834,1000 +RF Regr.,RF Clas.,0.9,0.905,0.49735687162215936,0.11853266588343181,0.999,1.2483681652474525,1000 +RF Regr.,RF Clas.,0.95,0.95178,0.5926372656329394,0.11853266588343181,0.999,1.251522831166833,1000 diff --git a/results/irm/irm_cate_metadata.csv b/results/irm/irm_cate_metadata.csv new file mode 100644 index 0000000..1556fdc --- /dev/null +++ b/results/irm/irm_cate_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,IRMCATECoverageSimulation,2025-06-03 14:24,10.22935619354248,3.12.9,scripts/irm/irm_cate_config.yml diff --git a/results/irm/irm_gate_config.yml b/results/irm/irm_gate_config.yml new file mode 100644 index 0000000..c1206fe --- /dev/null +++ b/results/irm/irm_gate_config.yml @@ -0,0 +1,63 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + n_obs: + - 500 + p: + - 10 + support_size: + - 5 + n_x: + - 1 +learner_definitions: + linear: &id001 + name: Linear + logit: &id002 + name: Logistic + rfr: &id003 + name: RF Regr. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + rfc: &id004 + name: RF Clas. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + lgbmr: &id005 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + lgbmc: &id006 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + - ml_g: *id003 + ml_m: *id004 + - ml_g: *id001 + ml_m: *id004 + - ml_g: *id003 + ml_m: *id002 + - ml_g: *id005 + ml_m: *id006 + - ml_g: *id005 + ml_m: *id002 + - ml_g: *id001 + ml_m: *id006 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/irm_gate_coverage.csv b/results/irm/irm_gate_coverage.csv index 4208c4b..ab738e0 100644 --- a/results/irm/irm_gate_coverage.csv +++ b/results/irm/irm_gate_coverage.csv @@ -1,9 +1,15 @@ Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition -LGBM,LGBM,0.9,0.941,2.1611526373873855,0.48511146422993706,1.0,5.063646757352436,1000 -LGBM,LGBM,0.95,0.9766666666666667,2.5751722007164304,0.48511146422993706,1.0,5.085406768554818,1000 -LGBM,Logistic Regression,0.9,0.916,0.3904280687581478,0.08854865701618204,0.997,0.9186129412168422,1000 -LGBM,Logistic Regression,0.95,0.9606666666666667,0.4652237383199531,0.08854865701618204,0.998,0.9197763724365766,1000 -Lasso,LGBM,0.9,0.9043333333333333,2.047596498760145,0.49693265710872336,1.0,4.807194790243196,1000 -Lasso,LGBM,0.95,0.959,2.43986171576749,0.49693265710872336,1.0,4.819825928994352,1000 -Lasso,Logistic Regression,0.9,0.9173333333333333,0.400967167206407,0.08997238623406502,0.999,0.9416356317140329,1000 -Lasso,Logistic Regression,0.95,0.9603333333333334,0.4777818486889552,0.08997238623406502,0.999,0.9403831822862944,1000 +LGBM Regr.,LGBM Clas.,0.9,0.9236666666666666,0.851465317065706,0.19722702192668598,1.0,2.011914810456983,1000 +LGBM Regr.,LGBM Clas.,0.95,0.972,1.014583503473648,0.19722702192668598,1.0,2.0031648741801455,1000 +LGBM Regr.,Logistic,0.9,0.9016666666666666,0.40159876476481454,0.09695556310967947,0.999,0.9443179359677094,1000 +LGBM Regr.,Logistic,0.95,0.9473333333333334,0.47853444359887215,0.09695556310967947,0.999,0.9425971024906015,1000 +Linear,LGBM Clas.,0.9,0.923,0.8592796676557775,0.19934007812012308,1.0,2.0230623845143447,1000 +Linear,LGBM Clas.,0.95,0.966,1.023894876515087,0.19934007812012308,1.0,2.0203888246588932,1000 +Linear,Logistic,0.9,0.915,0.4195046438102248,0.09830647800141477,0.999,0.9863092550256803,1000 +Linear,Logistic,0.95,0.9533333333333334,0.49987061446872544,0.09830647800141477,0.999,0.9830466638434444,1000 +Linear,RF Clas.,0.9,0.9206666666666666,0.4437261411849741,0.10126652921113784,0.999,1.0415488911519282,1000 +Linear,RF Clas.,0.95,0.9596666666666667,0.5287323087424741,0.10126652921113784,1.0,1.040804022375608,1000 +RF Regr.,Logistic,0.9,0.897,0.4014791112025455,0.09669425998448095,1.0,0.9423536354277464,1000 +RF Regr.,Logistic,0.95,0.9506666666666667,0.4783918675855257,0.09669425998448095,1.0,0.9449044166570425,1000 +RF Regr.,RF Clas.,0.9,0.8993333333333333,0.4239547202094922,0.10104977503999726,1.0,0.9958597773023945,1000 +RF Regr.,RF Clas.,0.95,0.95,0.5051732075554921,0.10104977503999726,0.999,0.9961472336115996,1000 diff --git a/results/irm/irm_gate_metadata.csv b/results/irm/irm_gate_metadata.csv new file mode 100644 index 0000000..13efdf0 --- /dev/null +++ b/results/irm/irm_gate_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,IRMGATECoverageSimulation,2025-06-03 13:48,9.251631820201874,3.12.9,scripts/irm/irm_gate_config.yml diff --git a/results/irm/lpq_Y0_coverage.csv b/results/irm/lpq_Y0_coverage.csv new file mode 100644 index 0000000..81556c0 --- /dev/null +++ b/results/irm/lpq_Y0_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,repetition +LGBM Clas.,LGBM Clas.,0.9,0.935,1.1830884138959616,0.24363577628567012,200 +LGBM Clas.,LGBM Clas.,0.95,0.966,1.4097367958876215,0.24363577628567012,200 +LGBM Clas.,Logistic,0.9,0.9470000000000001,1.1413178140016869,0.21854310075124617,200 +LGBM Clas.,Logistic,0.95,0.971,1.3599640561957957,0.21854310075124617,200 +Logistic,LGBM Clas.,0.9,0.932,1.1519919205445956,0.2330721806385721,200 +Logistic,LGBM Clas.,0.95,0.965,1.3726830386319526,0.2330721806385721,200 +Logistic,Logistic,0.9,0.9359999999999999,1.1121705811298108,0.21974489145697174,200 +Logistic,Logistic,0.95,0.9670000000000001,1.3252329860617573,0.21974489145697174,200 diff --git a/results/irm/lpq_Y1_coverage.csv b/results/irm/lpq_Y1_coverage.csv new file mode 100644 index 0000000..5f9e93b --- /dev/null +++ b/results/irm/lpq_Y1_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,repetition +LGBM Clas.,LGBM Clas.,0.9,0.927,1.6166365116884824,0.31220031423527045,200 +LGBM Clas.,LGBM Clas.,0.95,0.963,1.9263412178957202,0.31220031423527045,200 +LGBM Clas.,Logistic,0.9,0.943,1.563138831397548,0.2947145026308053,200 +LGBM Clas.,Logistic,0.95,0.971,1.8625948000329942,0.2947145026308053,200 +Logistic,LGBM Clas.,0.9,0.9420000000000001,1.567514402933319,0.2888227334203665,200 +Logistic,LGBM Clas.,0.95,0.971,1.867808615099192,0.2888227334203665,200 +Logistic,Logistic,0.9,0.94,1.514063009772447,0.28729535217904684,200 +Logistic,Logistic,0.95,0.9690000000000001,1.8041173517537936,0.28729535217904684,200 diff --git a/results/irm/lpq_config.yml b/results/irm/lpq_config.yml new file mode 100644 index 0000000..85abd3f --- /dev/null +++ b/results/irm/lpq_config.yml @@ -0,0 +1,48 @@ +simulation_parameters: + repetitions: 200 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + n_obs: + - 5000 + dim_x: + - 5 +learner_definitions: + logit: &id001 + name: Logistic + lgbmc: &id002 + name: LGBM Clas. + params: + n_estimators: 200 + learning_rate: 0.05 + num_leaves: 15 + max_depth: 5 + min_child_samples: 10 + subsample: 0.9 + colsample_bytree: 0.9 + reg_alpha: 0.0 + reg_lambda: 0.1 + random_state: 42 +dml_parameters: + tau_vec: + - - 0.3 + - 0.4 + - 0.5 + - 0.6 + - 0.7 + trimming_threshold: + - 0.01 + learners: + - ml_g: *id001 + ml_m: *id001 + - ml_g: *id002 + ml_m: *id002 + - ml_g: *id002 + ml_m: *id001 + - ml_g: *id001 + ml_m: *id002 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/lpq_effect_coverage.csv b/results/irm/lpq_effect_coverage.csv new file mode 100644 index 0000000..b4f076b --- /dev/null +++ b/results/irm/lpq_effect_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition +LGBM Clas.,LGBM Clas.,0.9,0.872,1.6181705726529205,0.40951487126584607,0.845,2.1354108341964264,200 +LGBM Clas.,LGBM Clas.,0.95,0.9159999999999999,1.928169164280202,0.40951487126584607,0.9,2.415503519370637,200 +LGBM Clas.,Logistic,0.9,0.8740000000000001,1.5610121253821851,0.3740351656285603,0.85,2.0636125204752434,200 +LGBM Clas.,Logistic,0.95,0.922,1.860060673514064,0.3740351656285603,0.9,2.3356640837390916,200 +Logistic,LGBM Clas.,0.9,0.88,1.5741042679593236,0.3725851701898653,0.85,2.068634959359497,200 +Logistic,LGBM Clas.,0.95,0.924,1.8756609235978434,0.3725851701898653,0.9,2.3413181297561683,200 +Logistic,Logistic,0.9,0.867,1.517966599775595,0.37223450094097055,0.85,1.9954916237204523,200 +Logistic,Logistic,0.95,0.92,1.808768766135729,0.37223450094097055,0.92,2.260577010587138,200 diff --git a/results/irm/lpq_metadata.csv b/results/irm/lpq_metadata.csv new file mode 100644 index 0000000..1ef72ff --- /dev/null +++ b/results/irm/lpq_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,LPQCoverageSimulation,2025-06-05 13:42,14.852794400850932,3.12.9,scripts/irm/lpq_config.yml diff --git a/results/irm/pq_Y0_coverage.csv b/results/irm/pq_Y0_coverage.csv new file mode 100644 index 0000000..904a016 --- /dev/null +++ b/results/irm/pq_Y0_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,repetition +LGBM Clas.,LGBM Clas.,0.9,0.8721428571428571,0.5855383193027298,0.15439049862379853,200 +LGBM Clas.,LGBM Clas.,0.95,0.932142857142857,0.6977119414135707,0.15439049862379853,200 +LGBM Clas.,Logistic,0.9,0.8607142857142857,0.4157006469047894,0.1135304583367824,200 +LGBM Clas.,Logistic,0.95,0.9264285714285714,0.4953378725822799,0.1135304583367824,200 +Logistic,LGBM Clas.,0.9,0.9007142857142857,0.57746009123863,0.1320561476852036,200 +Logistic,LGBM Clas.,0.95,0.9614285714285714,0.6880861389682303,0.1320561476852036,200 +Logistic,Logistic,0.9,0.8892857142857143,0.40888219424763206,0.10270056266342995,200 +Logistic,Logistic,0.95,0.9335714285714286,0.4872131851211298,0.10270056266342995,200 diff --git a/results/irm/pq_Y1_coverage.csv b/results/irm/pq_Y1_coverage.csv new file mode 100644 index 0000000..59ce777 --- /dev/null +++ b/results/irm/pq_Y1_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,repetition +LGBM Clas.,LGBM Clas.,0.9,0.9135714285714286,0.2547786121622827,0.06025439826660347,200 +LGBM Clas.,LGBM Clas.,0.95,0.9635714285714286,0.30358744126957204,0.06025439826660347,200 +LGBM Clas.,Logistic,0.9,0.925,0.23468112272452962,0.055254012231200356,200 +LGBM Clas.,Logistic,0.95,0.9621428571428571,0.27963980554548934,0.055254012231200356,200 +Logistic,LGBM Clas.,0.9,0.9292857142857143,0.25306018928392865,0.05756351160374336,200 +Logistic,LGBM Clas.,0.95,0.9728571428571429,0.30153981411503566,0.05756351160374336,200 +Logistic,Logistic,0.9,0.9235714285714286,0.23576437773394143,0.05403824556407575,200 +Logistic,Logistic,0.95,0.9721428571428571,0.2809305835027078,0.05403824556407575,200 diff --git a/results/irm/pq_config.yml b/results/irm/pq_config.yml new file mode 100644 index 0000000..e106878 --- /dev/null +++ b/results/irm/pq_config.yml @@ -0,0 +1,50 @@ +simulation_parameters: + repetitions: 200 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + n_obs: + - 5000 + dim_x: + - 5 +learner_definitions: + logit: &id001 + name: Logistic + lgbmc: &id002 + name: LGBM Clas. + params: + n_estimators: 200 + learning_rate: 0.05 + num_leaves: 15 + max_depth: 5 + min_child_samples: 10 + subsample: 0.9 + colsample_bytree: 0.9 + reg_alpha: 0.0 + reg_lambda: 0.1 + random_state: 42 +dml_parameters: + tau_vec: + - - 0.2 + - 0.3 + - 0.4 + - 0.5 + - 0.6 + - 0.7 + - 0.8 + trimming_threshold: + - 0.01 + learners: + - ml_g: *id001 + ml_m: *id001 + - ml_g: *id002 + ml_m: *id002 + - ml_g: *id002 + ml_m: *id001 + - ml_g: *id001 + ml_m: *id002 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/irm/pq_effect_coverage.csv b/results/irm/pq_effect_coverage.csv new file mode 100644 index 0000000..2723ea4 --- /dev/null +++ b/results/irm/pq_effect_coverage.csv @@ -0,0 +1,9 @@ +Learner g,Learner m,level,Coverage,CI Length,Bias,Uniform Coverage,Uniform CI Length,repetition +LGBM Clas.,LGBM Clas.,0.9,0.8721428571428571,0.6250811142351042,0.16391196246394876,0.83,0.8938663559093165,200 +LGBM Clas.,LGBM Clas.,0.95,0.9328571428571428,0.74483008776143,0.16391196246394876,0.91,0.9951285879711892,200 +LGBM Clas.,Logistic,0.9,0.8664285714285714,0.4563475824127607,0.1242769519340373,0.825,0.6552349151667995,200 +LGBM Clas.,Logistic,0.95,0.927142857142857,0.5437716835744456,0.1242769519340373,0.905,0.7290641682104055,200 +Logistic,LGBM Clas.,0.9,0.92,0.6192520613043571,0.14050842467602845,0.905,0.873815817039262,200 +Logistic,LGBM Clas.,0.95,0.9585714285714286,0.7378843427899359,0.14050842467602845,0.94,0.97537292218002,200 +Logistic,Logistic,0.9,0.8907142857142857,0.4547624845968767,0.11573164990154354,0.88,0.6436547419121843,200 +Logistic,Logistic,0.95,0.9385714285714286,0.541882923030528,0.11573164990154354,0.94,0.7176332662822544,200 diff --git a/results/irm/pq_metadata.csv b/results/irm/pq_metadata.csv new file mode 100644 index 0000000..ccb611e --- /dev/null +++ b/results/irm/pq_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,PQCoverageSimulation,2025-06-05 12:51,16.240616850058238,3.12.9,scripts/irm/pq_config.yml diff --git a/results/ssm/ssm_mar_ate_config.yml b/results/ssm/ssm_mar_ate_config.yml new file mode 100644 index 0000000..6c5f926 --- /dev/null +++ b/results/ssm/ssm_mar_ate_config.yml @@ -0,0 +1,74 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + theta: + - 1.0 + n_obs: + - 500 + dim_x: + - 20 +learner_definitions: + lasso: &id001 + name: LassoCV + logit: &id002 + name: Logistic + rfr: &id003 + name: RF Regr. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + rfc: &id004 + name: RF Clas. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + lgbmr: &id005 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + lgbmc: &id006 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + ml_pi: *id002 + - ml_g: *id003 + ml_m: *id004 + ml_pi: *id004 + - ml_g: *id001 + ml_m: *id004 + ml_pi: *id004 + - ml_g: *id003 + ml_m: *id002 + ml_pi: *id004 + - ml_g: *id003 + ml_m: *id004 + ml_pi: *id002 + - ml_g: *id005 + ml_m: *id006 + ml_pi: *id006 + - ml_g: *id001 + ml_m: *id006 + ml_pi: *id006 + - ml_g: *id005 + ml_m: *id002 + ml_pi: *id006 + - ml_g: *id005 + ml_m: *id006 + ml_pi: *id002 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/ssm/ssm_mar_ate_coverage.csv b/results/ssm/ssm_mar_ate_coverage.csv new file mode 100644 index 0000000..91f8e89 --- /dev/null +++ b/results/ssm/ssm_mar_ate_coverage.csv @@ -0,0 +1,19 @@ +Learner g,Learner m,Learner pi,level,Coverage,CI Length,Bias,repetition +LGBM Regr.,LGBM Clas.,LGBM Clas.,0.9,0.937,1.1020474646866227,0.25225717557270916,1000 +LGBM Regr.,LGBM Clas.,LGBM Clas.,0.95,0.976,1.3131705488242713,0.25225717557270916,1000 +LGBM Regr.,LGBM Clas.,Logistic,0.9,0.925,0.9396009375831231,0.21523318689504592,1000 +LGBM Regr.,LGBM Clas.,Logistic,0.95,0.967,1.1196035728213285,0.21523318689504592,1000 +LGBM Regr.,Logistic,LGBM Clas.,0.9,0.927,0.7652547530061172,0.16892900789678272,1000 +LGBM Regr.,Logistic,LGBM Clas.,0.95,0.976,0.9118572803769212,0.16892900789678272,1000 +LassoCV,LGBM Clas.,LGBM Clas.,0.9,0.94,1.0416524703569756,0.23136599807480204,1000 +LassoCV,LGBM Clas.,LGBM Clas.,0.95,0.979,1.2412054743683776,0.23136599807480204,1000 +LassoCV,Logistic,Logistic,0.9,0.918,0.5983979591318811,0.13819869402679225,1000 +LassoCV,Logistic,Logistic,0.95,0.961,0.7130351473854032,0.13819869402679225,1000 +LassoCV,RF Clas.,RF Clas.,0.9,0.915,0.5159726010062256,0.11685996635828219,1000 +LassoCV,RF Clas.,RF Clas.,0.95,0.966,0.6148192753515406,0.11685996635828219,1000 +RF Regr.,Logistic,RF Clas.,0.9,0.914,0.5740315353572116,0.13374033079721487,1000 +RF Regr.,Logistic,RF Clas.,0.95,0.967,0.6840007626548271,0.13374033079721487,1000 +RF Regr.,RF Clas.,Logistic,0.9,0.916,0.5717174746911912,0.13251444813486235,1000 +RF Regr.,RF Clas.,Logistic,0.95,0.964,0.6812433893000642,0.13251444813486235,1000 +RF Regr.,RF Clas.,RF Clas.,0.9,0.917,0.5204895028483878,0.12032384089569902,1000 +RF Regr.,RF Clas.,RF Clas.,0.95,0.957,0.6202014958648324,0.12032384089569902,1000 diff --git a/results/ssm/ssm_mar_ate_metadata.csv b/results/ssm/ssm_mar_ate_metadata.csv new file mode 100644 index 0000000..b21daf8 --- /dev/null +++ b/results/ssm/ssm_mar_ate_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,SSMMarATECoverageSimulation,2025-06-05 10:19,32.703589328130086,3.12.9,scripts/ssm/ssm_mar_ate_config.yml diff --git a/results/ssm/ssm_nonig_ate_config.yml b/results/ssm/ssm_nonig_ate_config.yml new file mode 100644 index 0000000..6c5f926 --- /dev/null +++ b/results/ssm/ssm_nonig_ate_config.yml @@ -0,0 +1,74 @@ +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 + random_seed: 42 + n_jobs: -2 +dgp_parameters: + theta: + - 1.0 + n_obs: + - 500 + dim_x: + - 20 +learner_definitions: + lasso: &id001 + name: LassoCV + logit: &id002 + name: Logistic + rfr: &id003 + name: RF Regr. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + rfc: &id004 + name: RF Clas. + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + lgbmr: &id005 + name: LGBM Regr. + params: + n_estimators: 500 + learning_rate: 0.01 + lgbmc: &id006 + name: LGBM Clas. + params: + n_estimators: 500 + learning_rate: 0.01 +dml_parameters: + learners: + - ml_g: *id001 + ml_m: *id002 + ml_pi: *id002 + - ml_g: *id003 + ml_m: *id004 + ml_pi: *id004 + - ml_g: *id001 + ml_m: *id004 + ml_pi: *id004 + - ml_g: *id003 + ml_m: *id002 + ml_pi: *id004 + - ml_g: *id003 + ml_m: *id004 + ml_pi: *id002 + - ml_g: *id005 + ml_m: *id006 + ml_pi: *id006 + - ml_g: *id001 + ml_m: *id006 + ml_pi: *id006 + - ml_g: *id005 + ml_m: *id002 + ml_pi: *id006 + - ml_g: *id005 + ml_m: *id006 + ml_pi: *id002 +confidence_parameters: + level: + - 0.95 + - 0.9 diff --git a/results/ssm/ssm_nonig_ate_coverage.csv b/results/ssm/ssm_nonig_ate_coverage.csv new file mode 100644 index 0000000..3fe8260 --- /dev/null +++ b/results/ssm/ssm_nonig_ate_coverage.csv @@ -0,0 +1,19 @@ +Learner g,Learner m,Learner pi,level,Coverage,CI Length,Bias,repetition +LGBM Regr.,LGBM Clas.,LGBM Clas.,0.9,0.906,1.5821140101993176,0.39941487645605,1000 +LGBM Regr.,LGBM Clas.,LGBM Clas.,0.95,0.958,1.8852051201504163,0.39941487645605,1000 +LGBM Regr.,LGBM Clas.,Logistic,0.9,0.935,2.3216975230755064,0.6217489058523816,1000 +LGBM Regr.,LGBM Clas.,Logistic,0.95,0.974,2.7664732312123816,0.6217489058523816,1000 +LGBM Regr.,Logistic,LGBM Clas.,0.9,0.834,1.1194612001997861,0.31096250226975036,1000 +LGBM Regr.,Logistic,LGBM Clas.,0.95,0.904,1.3339202945055102,0.31096250226975036,1000 +LassoCV,LGBM Clas.,LGBM Clas.,0.9,0.907,1.4773761863865273,0.3819267464776803,1000 +LassoCV,LGBM Clas.,LGBM Clas.,0.95,0.957,1.760402305402313,0.3819267464776803,1000 +LassoCV,Logistic,Logistic,0.9,0.857,1.8486984724368598,0.524196741214989,1000 +LassoCV,Logistic,Logistic,0.95,0.92,2.202860099452095,0.524196741214989,1000 +LassoCV,RF Clas.,RF Clas.,0.9,0.78,0.6505940752158618,0.20476023599261262,1000 +LassoCV,RF Clas.,RF Clas.,0.95,0.872,0.7752306558374706,0.20476023599261262,1000 +RF Regr.,Logistic,RF Clas.,0.9,0.708,0.7687129167631139,0.2667490928514026,1000 +RF Regr.,Logistic,RF Clas.,0.95,0.814,0.9159779366500982,0.2667490928514026,1000 +RF Regr.,RF Clas.,Logistic,0.9,0.892,1.437280071189879,0.396115809059242,1000 +RF Regr.,RF Clas.,Logistic,0.95,0.949,1.7126248372934632,0.396115809059242,1000 +RF Regr.,RF Clas.,RF Clas.,0.9,0.779,0.6653233603546214,0.20524894117905498,1000 +RF Regr.,RF Clas.,RF Clas.,0.95,0.857,0.7927816816047267,0.20524894117905498,1000 diff --git a/results/ssm/ssm_nonig_ate_metadata.csv b/results/ssm/ssm_nonig_ate_metadata.csv new file mode 100644 index 0000000..f8e47b8 --- /dev/null +++ b/results/ssm/ssm_nonig_ate_metadata.csv @@ -0,0 +1,2 @@ +DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File +0.10.0,SSMNonIgnorableATECoverageSimulation,2025-06-05 10:40,19.940552759170533,3.12.9,scripts/ssm/ssm_nonig_ate_config.yml diff --git a/scripts/irm/apo.py b/scripts/irm/apo.py new file mode 100644 index 0000000..2821336 --- /dev/null +++ b/scripts/irm/apo.py @@ -0,0 +1,13 @@ +from montecover.irm import APOCoverageSimulation + +# Create and run simulation with config file +sim = APOCoverageSimulation( + config_file="scripts/irm/apo_config.yml", + log_level="INFO", + log_file="logs/irm/apo_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="apo") + +# Save config file for reproducibility +sim.save_config("results/irm/apo_config.yml") diff --git a/scripts/irm/apo_config.yml b/scripts/irm/apo_config.yml new file mode 100644 index 0000000..511907a --- /dev/null +++ b/scripts/irm/apo_config.yml @@ -0,0 +1,51 @@ +# Simulation parameters for APO Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + n_obs: [500] # Sample size + n_levels: [2] + linear: [True] + +# Define reusable learner configurations +learner_definitions: + linear: &linear + name: "Linear" + + logit: &logit + name: "Logistic" + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + +dml_parameters: + treatment_level: [0, 1, 2] + trimming_threshold: [0.01] + learners: + - ml_g: *linear + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *linear + ml_m: *lgbmc + + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/apos.py b/scripts/irm/apos.py new file mode 100644 index 0000000..e26657c --- /dev/null +++ b/scripts/irm/apos.py @@ -0,0 +1,13 @@ +from montecover.irm import APOSCoverageSimulation + +# Create and run simulation with config file +sim = APOSCoverageSimulation( + config_file="scripts/irm/apos_config.yml", + log_level="INFO", + log_file="logs/irm/apos_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="apos") + +# Save config file for reproducibility +sim.save_config("results/irm/apos_config.yml") diff --git a/scripts/irm/apos_config.yml b/scripts/irm/apos_config.yml new file mode 100644 index 0000000..e7102c5 --- /dev/null +++ b/scripts/irm/apos_config.yml @@ -0,0 +1,51 @@ +# Simulation parameters for APOS Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + n_obs: [500] # Sample size + n_levels: [2] + linear: [True] + +# Define reusable learner configurations +learner_definitions: + linear: &linear + name: "Linear" + + logit: &logit + name: "Logistic" + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + +dml_parameters: + treatment_levels: [[0, 1, 2]] + trimming_threshold: [0.01] + learners: + - ml_g: *linear + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *linear + ml_m: *lgbmc + + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/cvar.py b/scripts/irm/cvar.py new file mode 100644 index 0000000..2195294 --- /dev/null +++ b/scripts/irm/cvar.py @@ -0,0 +1,13 @@ +from montecover.irm import CVARCoverageSimulation + +# Create and run simulation with config file +sim = CVARCoverageSimulation( + config_file="scripts/irm/cvar_config.yml", + log_level="INFO", + log_file="logs/irm/cvar_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="cvar") + +# Save config file for reproducibility +sim.save_config("results/irm/cvar_config.yml") diff --git a/scripts/irm/cvar_config.yml b/scripts/irm/cvar_config.yml new file mode 100644 index 0000000..f3531b4 --- /dev/null +++ b/scripts/irm/cvar_config.yml @@ -0,0 +1,63 @@ +# Simulation parameters for CVAR Coverage + +simulation_parameters: + repetitions: 200 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + n_obs: [5000] # Sample size + dim_x: [5] # Number of covariates + +# Define reusable learner configurations +learner_definitions: + linear: &linear + name: "Linear" + + logit: &logit + name: "Logistic" + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 200 # Fewer trees — faster + learning_rate: 0.05 # Balanced speed and stability + num_leaves: 15 # Modest complexity for smaller data + max_depth: 5 # Limit tree depth to avoid overfitting + min_child_samples: 10 # Minimum samples per leaf — conservative + subsample: 0.9 # Slightly randomized rows + colsample_bytree: 0.9 # Slightly randomized features + reg_alpha: 0.0 # No L1 regularization (faster) + reg_lambda: 0.1 # Light L2 regularization + random_state: 42 # Reproducible + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 200 # Fewer trees — faster + learning_rate: 0.05 # Balanced speed and stability + num_leaves: 15 # Modest complexity for smaller data + max_depth: 5 # Limit tree depth to avoid overfitting + min_child_samples: 10 # Minimum samples per leaf — conservative + subsample: 0.9 # Slightly randomized rows + colsample_bytree: 0.9 # Slightly randomized features + reg_alpha: 0.0 # No L1 regularization (faster) + reg_lambda: 0.1 # Light L2 regularization + random_state: 42 # Reproducible + +dml_parameters: + tau_vec: [[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]] # Quantiles + trimming_threshold: [0.01] + learners: + - ml_g: *linear + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *linear + ml_m: *lgbmc + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/irm_ate.py b/scripts/irm/irm_ate.py new file mode 100644 index 0000000..7b127bd --- /dev/null +++ b/scripts/irm/irm_ate.py @@ -0,0 +1,13 @@ +from montecover.irm import IRMATECoverageSimulation + +# Create and run simulation with config file +sim = IRMATECoverageSimulation( + config_file="scripts/irm/irm_ate_config.yml", + log_level="INFO", + log_file="logs/irm/irm_ate_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="irm_ate") + +# Save config file for reproducibility +sim.save_config("results/irm/irm_ate_config.yml") diff --git a/scripts/irm/irm_ate_config.yml b/scripts/irm/irm_ate_config.yml new file mode 100644 index 0000000..6a7a3f5 --- /dev/null +++ b/scripts/irm/irm_ate_config.yml @@ -0,0 +1,68 @@ +# Simulation parameters for IRM ATE Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + theta: [0.5] # Treatment effect + n_obs: [500] # Sample size + dim_x: [20] # Number of covariates + +# Define reusable learner configurations +learner_definitions: + lasso: &lasso + name: "LassoCV" + + logit: &logit + name: "Logistic" + + rfr: &rfr + name: "RF Regr." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + rfc: &rfc + name: "RF Clas." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + +dml_parameters: + learners: + - ml_g: *lasso + ml_m: *logit + - ml_g: *rfr + ml_m: *rfc + - ml_g: *lasso + ml_m: *rfc + - ml_g: *rfr + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *lasso + ml_m: *lgbmc + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/irm_ate_sensitivity.py b/scripts/irm/irm_ate_sensitivity.py index b53600c..d3651b6 100644 --- a/scripts/irm/irm_ate_sensitivity.py +++ b/scripts/irm/irm_ate_sensitivity.py @@ -1,198 +1,13 @@ -import numpy as np -import pandas as pd -from datetime import datetime -import time -import sys +from montecover.irm import IRMATESensitivityCoverageSimulation -from sklearn.linear_model import LinearRegression, LogisticRegression -from lightgbm import LGBMRegressor, LGBMClassifier - -import doubleml as dml -from doubleml.datasets import make_confounded_irm_data - -# Number of repetitions -n_rep = 500 -max_runtime = 5.5 * 3600 # 5.5 hours in seconds - -# DGP pars -n_obs = 5000 -theta = 5.0 -trimming_threshold = 0.05 - -dgp_pars = { - "gamma_a": 0.198, - "beta_a": 0.582, - "theta": theta, - "var_epsilon_y": 1.0, - "trimming_threshold": trimming_threshold, - "linear": False, -} - -# test inputs -np.random.seed(42) -dgp_dict = make_confounded_irm_data(n_obs=int(1e6), **dgp_pars) - -oracle_dict = dgp_dict["oracle_values"] -rho = oracle_dict["rho_ate"] -cf_y = oracle_dict["cf_y"] -cf_d = oracle_dict["cf_d_ate"] - -print(f"Confounding factor for Y: {cf_y}") -print(f"Confounding factor for D: {cf_d}") -print(f"Rho: {rho}") - -# to get the best possible comparison between different learners (and settings) we first simulate all datasets -np.random.seed(42) -datasets = [] -for i in range(n_rep): - dgp_dict = make_confounded_irm_data(n_obs=n_obs, **dgp_pars) - datasets.append(dgp_dict) - -# set up hyperparameters -hyperparam_dict = { - "learner_g": [ - ("Linear Reg.", LinearRegression()), - ( - "LGBM", - LGBMRegressor( - n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 - ), - ), - ], - "learner_m": [ - ("Logistic Regr.", LogisticRegression()), - ( - "LGBM", - LGBMClassifier( - n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 - ), - ), - ], - "level": [0.95, 0.90], -} - -# set up the results dataframe -df_results_detailed = pd.DataFrame() - -# start simulation -np.random.seed(42) -start_time = time.time() - -for i_rep in range(n_rep): - print(f"Repetition: {i_rep}/{n_rep}", end="\r") - - # Check the elapsed time - elapsed_time = time.time() - start_time - if elapsed_time > max_runtime: - print("Maximum runtime exceeded. Stopping the simulation.") - break - - # define the DoubleML data object - dgp_dict = datasets[i_rep] - - x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])] - df = pd.DataFrame( - np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])), - columns=x_cols + ["y", "d"], - ) - obj_dml_data = dml.DoubleMLData(df, "y", "d") - - for learner_g_idx, (learner_g_name, ml_g) in enumerate( - hyperparam_dict["learner_g"] - ): - for learner_m_idx, (learner_m_name, ml_m) in enumerate( - hyperparam_dict["learner_m"] - ): - # Set machine learning methods for g & m - dml_irm = dml.DoubleMLIRM( - obj_dml_data=obj_dml_data, - ml_g=ml_g, - ml_m=ml_m, - trimming_threshold=trimming_threshold, - ) - dml_irm.fit(n_jobs_cv=5) - - for level_idx, level in enumerate(hyperparam_dict["level"]): - estimate = dml_irm.coef[0] - confint = dml_irm.confint(level=level) - coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1]) - ci_length = confint.iloc[0, 1] - confint.iloc[0, 0] - - # test sensitivity parameters - dml_irm.sensitivity_analysis( - cf_y=cf_y, cf_d=cf_d, rho=rho, level=level, null_hypothesis=theta - ) - cover_lower = theta >= dml_irm.sensitivity_params["ci"]["lower"] - cover_upper = theta <= dml_irm.sensitivity_params["ci"]["upper"] - rv = dml_irm.sensitivity_params["rv"] - rva = dml_irm.sensitivity_params["rva"] - bias_lower = abs(theta - dml_irm.sensitivity_params["theta"]["lower"]) - bias_upper = abs(theta - dml_irm.sensitivity_params["theta"]["upper"]) - - df_results_detailed = pd.concat( - ( - df_results_detailed, - pd.DataFrame( - { - "Coverage": coverage.astype(int), - "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0], - "Bias": abs(estimate - theta), - "Coverage (Lower)": cover_lower.astype(int), - "Coverage (Upper)": cover_upper.astype(int), - "RV": rv, - "RVa": rva, - "Bias (Lower)": bias_lower, - "Bias (Upper)": bias_upper, - "Learner g": learner_g_name, - "Learner m": learner_m_name, - "level": level, - "repetition": i_rep, - }, - index=[0], - ), - ), - ignore_index=True, - ) - -df_results = ( - df_results_detailed.groupby(["Learner g", "Learner m", "level"]) - .agg( - { - "Coverage": "mean", - "CI Length": "mean", - "Bias": "mean", - "Coverage (Lower)": "mean", - "Coverage (Upper)": "mean", - "RV": "mean", - "RVa": "mean", - "Bias (Lower)": "mean", - "Bias (Upper)": "mean", - "repetition": "count", - } - ) - .reset_index() -) -print(df_results) - -end_time = time.time() -total_runtime = end_time - start_time - -# save results -script_name = "irm_ate_sensitivity.py" -path = "results/irm/irm_ate_sensitivity" - -metadata = pd.DataFrame( - { - "DoubleML Version": [dml.__version__], - "Script": [script_name], - "Date": [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], - "Total Runtime (seconds)": [total_runtime], - "Python Version": [ - f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}" - ], - } +# Create and run simulation with config file +sim = IRMATESensitivityCoverageSimulation( + config_file="scripts/irm/irm_ate_sensitivity_config.yml", + log_level="INFO", + log_file="logs/irm/irm_ate_sensitivity_sim.log", ) -print(metadata) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="irm_ate_sensitivity") -df_results.to_csv(f"{path}.csv", index=False) -metadata.to_csv(f"{path}_metadata.csv", index=False) +# Save config file for reproducibility +sim.save_config("results/irm/irm_ate_sensitivity_config.yml") diff --git a/scripts/irm/irm_ate_sensitivity_config.yml b/scripts/irm/irm_ate_sensitivity_config.yml new file mode 100644 index 0000000..c051ac0 --- /dev/null +++ b/scripts/irm/irm_ate_sensitivity_config.yml @@ -0,0 +1,54 @@ +# Simulation parameters for IRM ATE Sensitivity Coverage + +simulation_parameters: + repetitions: 500 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + theta: [5.0] # Treatment effect + n_obs: [5000] # Sample size + trimming_threshold: [0.05] # Trimming threshold + var_epsilon_y: [1.0] # Variance of outcome noise + linear: [False] + gamma_a: [0.198] + beta_a: [0.582] + +# Define reusable learner configurations +learner_definitions: + linear: &linear + name: "Linear" + + logit: &logit + name: "Logistic" + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + +dml_parameters: + learners: + - ml_g: *linear + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *linear + ml_m: *lgbmc + + trimming_threshold: [0.05] + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/irm_ate_sensitivity_old.py b/scripts/irm/irm_ate_sensitivity_old.py new file mode 100644 index 0000000..b53600c --- /dev/null +++ b/scripts/irm/irm_ate_sensitivity_old.py @@ -0,0 +1,198 @@ +import numpy as np +import pandas as pd +from datetime import datetime +import time +import sys + +from sklearn.linear_model import LinearRegression, LogisticRegression +from lightgbm import LGBMRegressor, LGBMClassifier + +import doubleml as dml +from doubleml.datasets import make_confounded_irm_data + +# Number of repetitions +n_rep = 500 +max_runtime = 5.5 * 3600 # 5.5 hours in seconds + +# DGP pars +n_obs = 5000 +theta = 5.0 +trimming_threshold = 0.05 + +dgp_pars = { + "gamma_a": 0.198, + "beta_a": 0.582, + "theta": theta, + "var_epsilon_y": 1.0, + "trimming_threshold": trimming_threshold, + "linear": False, +} + +# test inputs +np.random.seed(42) +dgp_dict = make_confounded_irm_data(n_obs=int(1e6), **dgp_pars) + +oracle_dict = dgp_dict["oracle_values"] +rho = oracle_dict["rho_ate"] +cf_y = oracle_dict["cf_y"] +cf_d = oracle_dict["cf_d_ate"] + +print(f"Confounding factor for Y: {cf_y}") +print(f"Confounding factor for D: {cf_d}") +print(f"Rho: {rho}") + +# to get the best possible comparison between different learners (and settings) we first simulate all datasets +np.random.seed(42) +datasets = [] +for i in range(n_rep): + dgp_dict = make_confounded_irm_data(n_obs=n_obs, **dgp_pars) + datasets.append(dgp_dict) + +# set up hyperparameters +hyperparam_dict = { + "learner_g": [ + ("Linear Reg.", LinearRegression()), + ( + "LGBM", + LGBMRegressor( + n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 + ), + ), + ], + "learner_m": [ + ("Logistic Regr.", LogisticRegression()), + ( + "LGBM", + LGBMClassifier( + n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 + ), + ), + ], + "level": [0.95, 0.90], +} + +# set up the results dataframe +df_results_detailed = pd.DataFrame() + +# start simulation +np.random.seed(42) +start_time = time.time() + +for i_rep in range(n_rep): + print(f"Repetition: {i_rep}/{n_rep}", end="\r") + + # Check the elapsed time + elapsed_time = time.time() - start_time + if elapsed_time > max_runtime: + print("Maximum runtime exceeded. Stopping the simulation.") + break + + # define the DoubleML data object + dgp_dict = datasets[i_rep] + + x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])] + df = pd.DataFrame( + np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])), + columns=x_cols + ["y", "d"], + ) + obj_dml_data = dml.DoubleMLData(df, "y", "d") + + for learner_g_idx, (learner_g_name, ml_g) in enumerate( + hyperparam_dict["learner_g"] + ): + for learner_m_idx, (learner_m_name, ml_m) in enumerate( + hyperparam_dict["learner_m"] + ): + # Set machine learning methods for g & m + dml_irm = dml.DoubleMLIRM( + obj_dml_data=obj_dml_data, + ml_g=ml_g, + ml_m=ml_m, + trimming_threshold=trimming_threshold, + ) + dml_irm.fit(n_jobs_cv=5) + + for level_idx, level in enumerate(hyperparam_dict["level"]): + estimate = dml_irm.coef[0] + confint = dml_irm.confint(level=level) + coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1]) + ci_length = confint.iloc[0, 1] - confint.iloc[0, 0] + + # test sensitivity parameters + dml_irm.sensitivity_analysis( + cf_y=cf_y, cf_d=cf_d, rho=rho, level=level, null_hypothesis=theta + ) + cover_lower = theta >= dml_irm.sensitivity_params["ci"]["lower"] + cover_upper = theta <= dml_irm.sensitivity_params["ci"]["upper"] + rv = dml_irm.sensitivity_params["rv"] + rva = dml_irm.sensitivity_params["rva"] + bias_lower = abs(theta - dml_irm.sensitivity_params["theta"]["lower"]) + bias_upper = abs(theta - dml_irm.sensitivity_params["theta"]["upper"]) + + df_results_detailed = pd.concat( + ( + df_results_detailed, + pd.DataFrame( + { + "Coverage": coverage.astype(int), + "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0], + "Bias": abs(estimate - theta), + "Coverage (Lower)": cover_lower.astype(int), + "Coverage (Upper)": cover_upper.astype(int), + "RV": rv, + "RVa": rva, + "Bias (Lower)": bias_lower, + "Bias (Upper)": bias_upper, + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + "repetition": i_rep, + }, + index=[0], + ), + ), + ignore_index=True, + ) + +df_results = ( + df_results_detailed.groupby(["Learner g", "Learner m", "level"]) + .agg( + { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Coverage (Lower)": "mean", + "Coverage (Upper)": "mean", + "RV": "mean", + "RVa": "mean", + "Bias (Lower)": "mean", + "Bias (Upper)": "mean", + "repetition": "count", + } + ) + .reset_index() +) +print(df_results) + +end_time = time.time() +total_runtime = end_time - start_time + +# save results +script_name = "irm_ate_sensitivity.py" +path = "results/irm/irm_ate_sensitivity" + +metadata = pd.DataFrame( + { + "DoubleML Version": [dml.__version__], + "Script": [script_name], + "Date": [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], + "Total Runtime (seconds)": [total_runtime], + "Python Version": [ + f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}" + ], + } +) +print(metadata) + +df_results.to_csv(f"{path}.csv", index=False) +metadata.to_csv(f"{path}_metadata.csv", index=False) diff --git a/scripts/irm/irm_atte.py b/scripts/irm/irm_atte.py new file mode 100644 index 0000000..829ba0e --- /dev/null +++ b/scripts/irm/irm_atte.py @@ -0,0 +1,13 @@ +from montecover.irm import IRMATTECoverageSimulation + +# Create and run simulation with config file +sim = IRMATTECoverageSimulation( + config_file="scripts/irm/irm_atte_config.yml", + log_level="INFO", + log_file="logs/irm/irm_atte_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="irm_atte") + +# Save config file for reproducibility +sim.save_config("results/irm/irm_atte_config.yml") diff --git a/scripts/irm/irm_atte_config.yml b/scripts/irm/irm_atte_config.yml new file mode 100644 index 0000000..2a3ba63 --- /dev/null +++ b/scripts/irm/irm_atte_config.yml @@ -0,0 +1,68 @@ +# Simulation parameters for IRM ATE Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + theta: [0.5] # Treatment effect + n_obs: [500] # Sample size + dim_x: [20] # Number of covariates + +# Define reusable learner configurations +learner_definitions: + lasso: &lasso + name: "LassoCV" + + logit: &logit + name: "Logistic" + + rfr: &rfr + name: "RF Regr." + params: + n_estimators: 200 + max_features: 20 + max_depth: 20 + min_samples_leaf: 2 + + rfc: &rfc + name: "RF Clas." + params: + n_estimators: 200 + max_features: 20 + max_depth: 20 + min_samples_leaf: 20 + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + +dml_parameters: + learners: + - ml_g: *lasso + ml_m: *logit + - ml_g: *rfr + ml_m: *rfc + - ml_g: *lasso + ml_m: *rfc + - ml_g: *rfr + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *lasso + ml_m: *lgbmc + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/irm_atte_sensitivity.py b/scripts/irm/irm_atte_sensitivity.py index 10b5f0d..f28d9c9 100644 --- a/scripts/irm/irm_atte_sensitivity.py +++ b/scripts/irm/irm_atte_sensitivity.py @@ -1,198 +1,13 @@ -import numpy as np -import pandas as pd -from datetime import datetime -import time -import sys +from montecover.irm import IRMATTESensitivityCoverageSimulation -from sklearn.linear_model import LinearRegression, LogisticRegression -from lightgbm import LGBMRegressor, LGBMClassifier - -import doubleml as dml -from doubleml.datasets import make_confounded_irm_data - -# Number of repetitions -n_rep = 500 -max_runtime = 5.5 * 3600 # 5.5 hours in seconds - -# DGP pars -n_obs = 5000 -theta = 5.0 -trimming_threshold = 0.05 - -dgp_pars = { - "gamma_a": 0.151, - "beta_a": 0.580, - "theta": theta, - "var_epsilon_y": 1.0, - "trimming_threshold": trimming_threshold, - "linear": False, -} - -# test inputs -np.random.seed(42) -dgp_dict = make_confounded_irm_data(n_obs=int(1e6), **dgp_pars) - -oracle_dict = dgp_dict["oracle_values"] -rho = oracle_dict["rho_atte"] -cf_y = oracle_dict["cf_y"] -cf_d = oracle_dict["cf_d_atte"] - -print(f"Confounding factor for Y: {cf_y}") -print(f"Confounding factor for D: {cf_d}") -print(f"Rho: {rho}") - -# to get the best possible comparison between different learners (and settings) we first simulate all datasets -np.random.seed(42) -datasets = [] -for i in range(n_rep): - dgp_dict = make_confounded_irm_data(n_obs=n_obs, **dgp_pars) - datasets.append(dgp_dict) - -# set up hyperparameters -hyperparam_dict = { - "learner_g": [ - ("Linear Reg.", LinearRegression()), - ( - "LGBM", - LGBMRegressor( - n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 - ), - ), - ], - "learner_m": [ - ("Logistic Regr.", LogisticRegression()), - ( - "LGBM", - LGBMClassifier( - n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 - ), - ), - ], - "level": [0.95, 0.90], -} - -# set up the results dataframe -df_results_detailed = pd.DataFrame() - -# start simulation -np.random.seed(42) -start_time = time.time() - -for i_rep in range(n_rep): - print(f"Repetition: {i_rep}/{n_rep}", end="\r") - - # Check the elapsed time - elapsed_time = time.time() - start_time - if elapsed_time > max_runtime: - print("Maximum runtime exceeded. Stopping the simulation.") - break - - # define the DoubleML data object - dgp_dict = datasets[i_rep] - - x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])] - df = pd.DataFrame( - np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])), - columns=x_cols + ["y", "d"], - ) - obj_dml_data = dml.DoubleMLData(df, "y", "d") - - for learner_g_idx, (learner_g_name, ml_g) in enumerate( - hyperparam_dict["learner_g"] - ): - for learner_m_idx, (learner_m_name, ml_m) in enumerate( - hyperparam_dict["learner_m"] - ): - # Set machine learning methods for g & m - dml_irm = dml.DoubleMLIRM( - obj_dml_data=obj_dml_data, - score="ATTE", - ml_g=ml_g, - ml_m=ml_m, - trimming_threshold=trimming_threshold, - ) - dml_irm.fit(n_jobs_cv=5) - - for level_idx, level in enumerate(hyperparam_dict["level"]): - estimate = dml_irm.coef[0] - confint = dml_irm.confint(level=level) - coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1]) - ci_length = confint.iloc[0, 1] - confint.iloc[0, 0] - - # test sensitivity parameters - dml_irm.sensitivity_analysis( - cf_y=cf_y, cf_d=cf_d, rho=rho, level=level, null_hypothesis=theta - ) - cover_lower = theta >= dml_irm.sensitivity_params["ci"]["lower"] - cover_upper = theta <= dml_irm.sensitivity_params["ci"]["upper"] - rv = dml_irm.sensitivity_params["rv"] - rva = dml_irm.sensitivity_params["rva"] - bias_lower = abs(theta - dml_irm.sensitivity_params["theta"]["lower"]) - bias_upper = abs(theta - dml_irm.sensitivity_params["theta"]["upper"]) - - df_results_detailed = pd.concat( - ( - df_results_detailed, - pd.DataFrame( - { - "Coverage": coverage.astype(int), - "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0], - "Bias": abs(estimate - theta), - "Coverage (Lower)": cover_lower.astype(int), - "Coverage (Upper)": cover_upper.astype(int), - "RV": rv, - "RVa": rva, - "Bias (Lower)": bias_lower, - "Bias (Upper)": bias_upper, - "Learner g": learner_g_name, - "Learner m": learner_m_name, - "level": level, - "repetition": i_rep, - }, - index=[0], - ), - ), - ignore_index=True, - ) - -df_results = ( - df_results_detailed.groupby(["Learner g", "Learner m", "level"]) - .agg( - { - "Coverage": "mean", - "CI Length": "mean", - "Bias": "mean", - "Coverage (Lower)": "mean", - "Coverage (Upper)": "mean", - "RV": "mean", - "RVa": "mean", - "Bias (Lower)": "mean", - "Bias (Upper)": "mean", - "repetition": "count", - } - ) - .reset_index() -) -print(df_results) -end_time = time.time() -total_runtime = end_time - start_time - -# save results -script_name = "irm_atte_sensitivity.py" -path = "results/irm/irm_atte_sensitivity" - -metadata = pd.DataFrame( - { - "DoubleML Version": [dml.__version__], - "Script": [script_name], - "Date": [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], - "Total Runtime (seconds)": [total_runtime], - "Python Version": [ - f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}" - ], - } +# Create and run simulation with config file +sim = IRMATTESensitivityCoverageSimulation( + config_file="scripts/irm/irm_atte_sensitivity_config.yml", + log_level="INFO", + log_file="logs/irm/irm_atte_sensitivity_sim.log", ) -print(metadata) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="irm_atte_sensitivity") -df_results.to_csv(f"{path}.csv", index=False) -metadata.to_csv(f"{path}_metadata.csv", index=False) +# Save config file for reproducibility +sim.save_config("results/irm/irm_atte_sensitivity_config.yml") diff --git a/scripts/irm/irm_atte_sensitivity_config.yml b/scripts/irm/irm_atte_sensitivity_config.yml new file mode 100644 index 0000000..e6df9c4 --- /dev/null +++ b/scripts/irm/irm_atte_sensitivity_config.yml @@ -0,0 +1,54 @@ +# Simulation parameters for IRM ATTE Sensitivity Coverage + +simulation_parameters: + repetitions: 500 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + theta: [5.0] # Treatment effect + n_obs: [5000] # Sample size + trimming_threshold: [0.05] # Trimming threshold + var_epsilon_y: [1.0] # Variance of outcome noise + linear: [False] + gamma_a: [0.151] + beta_a: [0.582] + +# Define reusable learner configurations +learner_definitions: + linear: &linear + name: "Linear" + + logit: &logit + name: "Logistic" + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + min_child_samples: 10 + +dml_parameters: + learners: + - ml_g: *linear + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *linear + ml_m: *lgbmc + + trimming_threshold: [0.05] + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/irm_atte_sensitivity_old.py b/scripts/irm/irm_atte_sensitivity_old.py new file mode 100644 index 0000000..10b5f0d --- /dev/null +++ b/scripts/irm/irm_atte_sensitivity_old.py @@ -0,0 +1,198 @@ +import numpy as np +import pandas as pd +from datetime import datetime +import time +import sys + +from sklearn.linear_model import LinearRegression, LogisticRegression +from lightgbm import LGBMRegressor, LGBMClassifier + +import doubleml as dml +from doubleml.datasets import make_confounded_irm_data + +# Number of repetitions +n_rep = 500 +max_runtime = 5.5 * 3600 # 5.5 hours in seconds + +# DGP pars +n_obs = 5000 +theta = 5.0 +trimming_threshold = 0.05 + +dgp_pars = { + "gamma_a": 0.151, + "beta_a": 0.580, + "theta": theta, + "var_epsilon_y": 1.0, + "trimming_threshold": trimming_threshold, + "linear": False, +} + +# test inputs +np.random.seed(42) +dgp_dict = make_confounded_irm_data(n_obs=int(1e6), **dgp_pars) + +oracle_dict = dgp_dict["oracle_values"] +rho = oracle_dict["rho_atte"] +cf_y = oracle_dict["cf_y"] +cf_d = oracle_dict["cf_d_atte"] + +print(f"Confounding factor for Y: {cf_y}") +print(f"Confounding factor for D: {cf_d}") +print(f"Rho: {rho}") + +# to get the best possible comparison between different learners (and settings) we first simulate all datasets +np.random.seed(42) +datasets = [] +for i in range(n_rep): + dgp_dict = make_confounded_irm_data(n_obs=n_obs, **dgp_pars) + datasets.append(dgp_dict) + +# set up hyperparameters +hyperparam_dict = { + "learner_g": [ + ("Linear Reg.", LinearRegression()), + ( + "LGBM", + LGBMRegressor( + n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 + ), + ), + ], + "learner_m": [ + ("Logistic Regr.", LogisticRegression()), + ( + "LGBM", + LGBMClassifier( + n_estimators=500, learning_rate=0.01, min_child_samples=10, verbose=-1 + ), + ), + ], + "level": [0.95, 0.90], +} + +# set up the results dataframe +df_results_detailed = pd.DataFrame() + +# start simulation +np.random.seed(42) +start_time = time.time() + +for i_rep in range(n_rep): + print(f"Repetition: {i_rep}/{n_rep}", end="\r") + + # Check the elapsed time + elapsed_time = time.time() - start_time + if elapsed_time > max_runtime: + print("Maximum runtime exceeded. Stopping the simulation.") + break + + # define the DoubleML data object + dgp_dict = datasets[i_rep] + + x_cols = [f"X{i + 1}" for i in np.arange(dgp_dict["x"].shape[1])] + df = pd.DataFrame( + np.column_stack((dgp_dict["x"], dgp_dict["y"], dgp_dict["d"])), + columns=x_cols + ["y", "d"], + ) + obj_dml_data = dml.DoubleMLData(df, "y", "d") + + for learner_g_idx, (learner_g_name, ml_g) in enumerate( + hyperparam_dict["learner_g"] + ): + for learner_m_idx, (learner_m_name, ml_m) in enumerate( + hyperparam_dict["learner_m"] + ): + # Set machine learning methods for g & m + dml_irm = dml.DoubleMLIRM( + obj_dml_data=obj_dml_data, + score="ATTE", + ml_g=ml_g, + ml_m=ml_m, + trimming_threshold=trimming_threshold, + ) + dml_irm.fit(n_jobs_cv=5) + + for level_idx, level in enumerate(hyperparam_dict["level"]): + estimate = dml_irm.coef[0] + confint = dml_irm.confint(level=level) + coverage = (confint.iloc[0, 0] < theta) & (theta < confint.iloc[0, 1]) + ci_length = confint.iloc[0, 1] - confint.iloc[0, 0] + + # test sensitivity parameters + dml_irm.sensitivity_analysis( + cf_y=cf_y, cf_d=cf_d, rho=rho, level=level, null_hypothesis=theta + ) + cover_lower = theta >= dml_irm.sensitivity_params["ci"]["lower"] + cover_upper = theta <= dml_irm.sensitivity_params["ci"]["upper"] + rv = dml_irm.sensitivity_params["rv"] + rva = dml_irm.sensitivity_params["rva"] + bias_lower = abs(theta - dml_irm.sensitivity_params["theta"]["lower"]) + bias_upper = abs(theta - dml_irm.sensitivity_params["theta"]["upper"]) + + df_results_detailed = pd.concat( + ( + df_results_detailed, + pd.DataFrame( + { + "Coverage": coverage.astype(int), + "CI Length": confint.iloc[0, 1] - confint.iloc[0, 0], + "Bias": abs(estimate - theta), + "Coverage (Lower)": cover_lower.astype(int), + "Coverage (Upper)": cover_upper.astype(int), + "RV": rv, + "RVa": rva, + "Bias (Lower)": bias_lower, + "Bias (Upper)": bias_upper, + "Learner g": learner_g_name, + "Learner m": learner_m_name, + "level": level, + "repetition": i_rep, + }, + index=[0], + ), + ), + ignore_index=True, + ) + +df_results = ( + df_results_detailed.groupby(["Learner g", "Learner m", "level"]) + .agg( + { + "Coverage": "mean", + "CI Length": "mean", + "Bias": "mean", + "Coverage (Lower)": "mean", + "Coverage (Upper)": "mean", + "RV": "mean", + "RVa": "mean", + "Bias (Lower)": "mean", + "Bias (Upper)": "mean", + "repetition": "count", + } + ) + .reset_index() +) +print(df_results) +end_time = time.time() +total_runtime = end_time - start_time + +# save results +script_name = "irm_atte_sensitivity.py" +path = "results/irm/irm_atte_sensitivity" + +metadata = pd.DataFrame( + { + "DoubleML Version": [dml.__version__], + "Script": [script_name], + "Date": [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], + "Total Runtime (seconds)": [total_runtime], + "Python Version": [ + f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}" + ], + } +) +print(metadata) + +df_results.to_csv(f"{path}.csv", index=False) +metadata.to_csv(f"{path}_metadata.csv", index=False) diff --git a/scripts/irm/irm_cate.py b/scripts/irm/irm_cate.py new file mode 100644 index 0000000..6d265b3 --- /dev/null +++ b/scripts/irm/irm_cate.py @@ -0,0 +1,13 @@ +from montecover.irm import IRMCATECoverageSimulation + +# Create and run simulation with config file +sim = IRMCATECoverageSimulation( + config_file="scripts/irm/irm_cate_config.yml", + log_level="INFO", + log_file="logs/irm/irm_cate_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="irm_cate") + +# Save config file for reproducibility +sim.save_config("results/irm/irm_cate_config.yml") diff --git a/scripts/irm/irm_cate_config.yml b/scripts/irm/irm_cate_config.yml new file mode 100644 index 0000000..c09f225 --- /dev/null +++ b/scripts/irm/irm_cate_config.yml @@ -0,0 +1,69 @@ +# Simulation parameters for IRM CATE Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + n_obs: [500] # Sample size + p: [10] # Number of covariates + support_size: [5] # Number of non-zero coefficients + n_x: [1] + +# Define reusable learner configurations +learner_definitions: + linear: &linear + name: "Linear" + + logit: &logit + name: "Logistic" + + rfr: &rfr + name: "RF Regr." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + rfc: &rfc + name: "RF Clas." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + +dml_parameters: + learners: + - ml_g: *linear + ml_m: *logit + - ml_g: *rfr + ml_m: *rfc + - ml_g: *linear + ml_m: *rfc + - ml_g: *rfr + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *linear + ml_m: *lgbmc + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/irm_gate.py b/scripts/irm/irm_gate.py new file mode 100644 index 0000000..97fc0f3 --- /dev/null +++ b/scripts/irm/irm_gate.py @@ -0,0 +1,13 @@ +from montecover.irm import IRMGATECoverageSimulation + +# Create and run simulation with config file +sim = IRMGATECoverageSimulation( + config_file="scripts/irm/irm_gate_config.yml", + log_level="INFO", + log_file="logs/irm/irm_gate_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="irm_gate") + +# Save config file for reproducibility +sim.save_config("results/irm/irm_gate_config.yml") diff --git a/scripts/irm/irm_gate_config.yml b/scripts/irm/irm_gate_config.yml new file mode 100644 index 0000000..3143ef1 --- /dev/null +++ b/scripts/irm/irm_gate_config.yml @@ -0,0 +1,69 @@ +# Simulation parameters for IRM ATE Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + n_obs: [500] # Sample size + p: [10] # Number of covariates + support_size: [5] # Number of non-zero coefficients + n_x: [1] + +# Define reusable learner configurations +learner_definitions: + linear: &linear + name: "Linear" + + logit: &logit + name: "Logistic" + + rfr: &rfr + name: "RF Regr." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + rfc: &rfc + name: "RF Clas." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + +dml_parameters: + learners: + - ml_g: *linear + ml_m: *logit + - ml_g: *rfr + ml_m: *rfc + - ml_g: *linear + ml_m: *rfc + - ml_g: *rfr + ml_m: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + - ml_g: *linear + ml_m: *lgbmc + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/lpq.py b/scripts/irm/lpq.py new file mode 100644 index 0000000..220aeab --- /dev/null +++ b/scripts/irm/lpq.py @@ -0,0 +1,13 @@ +from montecover.irm import LPQCoverageSimulation + +# Create and run simulation with config file +sim = LPQCoverageSimulation( + config_file="scripts/irm/lpq_config.yml", + log_level="INFO", + log_file="logs/irm/lpq_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="lpq") + +# Save config file for reproducibility +sim.save_config("results/irm/lpq_config.yml") diff --git a/scripts/irm/lpq_config.yml b/scripts/irm/lpq_config.yml new file mode 100644 index 0000000..ba717e4 --- /dev/null +++ b/scripts/irm/lpq_config.yml @@ -0,0 +1,46 @@ +# Simulation parameters for LPQ Coverage + +simulation_parameters: + repetitions: 200 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + n_obs: [5000] # Sample size + dim_x: [5] # Number of covariates + +# Define reusable learner configurations +learner_definitions: + logit: &logit + name: "Logistic" + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 200 # Fewer trees — faster + learning_rate: 0.05 # Balanced speed and stability + num_leaves: 15 # Modest complexity for smaller data + max_depth: 5 # Limit tree depth to avoid overfitting + min_child_samples: 10 # Minimum samples per leaf — conservative + subsample: 0.9 # Slightly randomized rows + colsample_bytree: 0.9 # Slightly randomized features + reg_alpha: 0.0 # No L1 regularization (faster) + reg_lambda: 0.1 # Light L2 regularization + random_state: 42 # Reproducible + +dml_parameters: + tau_vec: [[0.3, 0.4, 0.5, 0.6, 0.7]] # Quantiles + trimming_threshold: [0.01] + learners: + - ml_g: *logit + ml_m: *logit + - ml_g: *lgbmc + ml_m: *lgbmc + - ml_g: *lgbmc + ml_m: *logit + - ml_g: *logit + ml_m: *lgbmc + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/irm/pq.py b/scripts/irm/pq.py new file mode 100644 index 0000000..61237c0 --- /dev/null +++ b/scripts/irm/pq.py @@ -0,0 +1,13 @@ +from montecover.irm import PQCoverageSimulation + +# Create and run simulation with config file +sim = PQCoverageSimulation( + config_file="scripts/irm/pq_config.yml", + log_level="INFO", + log_file="logs/irm/pq_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/irm/", file_prefix="pq") + +# Save config file for reproducibility +sim.save_config("results/irm/pq_config.yml") diff --git a/scripts/irm/pq_config.yml b/scripts/irm/pq_config.yml new file mode 100644 index 0000000..fcc566f --- /dev/null +++ b/scripts/irm/pq_config.yml @@ -0,0 +1,46 @@ +# Simulation parameters for PQ Coverage + +simulation_parameters: + repetitions: 200 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + n_obs: [5000] # Sample size + dim_x: [5] # Number of covariates + +# Define reusable learner configurations +learner_definitions: + logit: &logit + name: "Logistic" + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 200 # Fewer trees — faster + learning_rate: 0.05 # Balanced speed and stability + num_leaves: 15 # Modest complexity for smaller data + max_depth: 5 # Limit tree depth to avoid overfitting + min_child_samples: 10 # Minimum samples per leaf — conservative + subsample: 0.9 # Slightly randomized rows + colsample_bytree: 0.9 # Slightly randomized features + reg_alpha: 0.0 # No L1 regularization (faster) + reg_lambda: 0.1 # Light L2 regularization + random_state: 42 # Reproducible + +dml_parameters: + tau_vec: [[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]] # Quantiles + trimming_threshold: [0.01] + learners: + - ml_g: *logit + ml_m: *logit + - ml_g: *lgbmc + ml_m: *lgbmc + - ml_g: *lgbmc + ml_m: *logit + - ml_g: *logit + ml_m: *lgbmc + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/ssm/ssm_mar_ate.py b/scripts/ssm/ssm_mar_ate.py new file mode 100644 index 0000000..d22c31c --- /dev/null +++ b/scripts/ssm/ssm_mar_ate.py @@ -0,0 +1,13 @@ +from montecover.ssm import SSMMarATECoverageSimulation + +# Create and run simulation with config file +sim = SSMMarATECoverageSimulation( + config_file="scripts/ssm/ssm_mar_ate_config.yml", + log_level="INFO", + log_file="logs/ssm/ssm_mar_ate_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/ssm/", file_prefix="ssm_mar_ate") + +# Save config file for reproducibility +sim.save_config("results/ssm/ssm_mar_ate_config.yml") diff --git a/scripts/ssm/ssm_mar_ate_config.yml b/scripts/ssm/ssm_mar_ate_config.yml new file mode 100644 index 0000000..ca85751 --- /dev/null +++ b/scripts/ssm/ssm_mar_ate_config.yml @@ -0,0 +1,82 @@ +# Simulation parameters for IRM ATE Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + theta: [1.0] # Treatment effect + n_obs: [500] # Sample size + dim_x: [20] # Number of covariates + +# Define reusable learner configurations +learner_definitions: + lasso: &lasso + name: "LassoCV" + + logit: &logit + name: "Logistic" + + rfr: &rfr + name: "RF Regr." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + rfc: &rfc + name: "RF Clas." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + +dml_parameters: + learners: + - ml_g: *lasso + ml_m: *logit + ml_pi: *logit + - ml_g: *rfr + ml_m: *rfc + ml_pi: *rfc + - ml_g: *lasso + ml_m: *rfc + ml_pi: *rfc + - ml_g: *rfr + ml_m: *logit + ml_pi: *rfc + - ml_g: *rfr + ml_m: *rfc + ml_pi: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + ml_pi: *lgbmc + - ml_g: *lasso + ml_m: *lgbmc + ml_pi: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + ml_pi: *lgbmc + - ml_g: *lgbmr + ml_m: *lgbmc + ml_pi: *logit + + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels diff --git a/scripts/ssm/ssm_nonig_ate.py b/scripts/ssm/ssm_nonig_ate.py new file mode 100644 index 0000000..2609915 --- /dev/null +++ b/scripts/ssm/ssm_nonig_ate.py @@ -0,0 +1,13 @@ +from montecover.ssm import SSMNonIgnorableATECoverageSimulation + +# Create and run simulation with config file +sim = SSMNonIgnorableATECoverageSimulation( + config_file="scripts/ssm/ssm_nonig_ate_config.yml", + log_level="INFO", + log_file="logs/ssm/ssm_nonig_ate_sim.log", +) +sim.run_simulation() +sim.save_results(output_path="results/ssm/", file_prefix="ssm_nonig_ate") + +# Save config file for reproducibility +sim.save_config("results/ssm/ssm_nonig_ate_config.yml") diff --git a/scripts/ssm/ssm_nonig_ate_config.yml b/scripts/ssm/ssm_nonig_ate_config.yml new file mode 100644 index 0000000..ca85751 --- /dev/null +++ b/scripts/ssm/ssm_nonig_ate_config.yml @@ -0,0 +1,82 @@ +# Simulation parameters for IRM ATE Coverage + +simulation_parameters: + repetitions: 1000 + max_runtime: 19800 # 5.5 hours in seconds + random_seed: 42 + n_jobs: -2 + +dgp_parameters: + theta: [1.0] # Treatment effect + n_obs: [500] # Sample size + dim_x: [20] # Number of covariates + +# Define reusable learner configurations +learner_definitions: + lasso: &lasso + name: "LassoCV" + + logit: &logit + name: "Logistic" + + rfr: &rfr + name: "RF Regr." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + rfc: &rfc + name: "RF Clas." + params: + n_estimators: 200 + max_features: 20 + max_depth: 5 + min_samples_leaf: 2 + + lgbmr: &lgbmr + name: "LGBM Regr." + params: + n_estimators: 500 + learning_rate: 0.01 + + lgbmc: &lgbmc + name: "LGBM Clas." + params: + n_estimators: 500 + learning_rate: 0.01 + +dml_parameters: + learners: + - ml_g: *lasso + ml_m: *logit + ml_pi: *logit + - ml_g: *rfr + ml_m: *rfc + ml_pi: *rfc + - ml_g: *lasso + ml_m: *rfc + ml_pi: *rfc + - ml_g: *rfr + ml_m: *logit + ml_pi: *rfc + - ml_g: *rfr + ml_m: *rfc + ml_pi: *logit + - ml_g: *lgbmr + ml_m: *lgbmc + ml_pi: *lgbmc + - ml_g: *lasso + ml_m: *lgbmc + ml_pi: *lgbmc + - ml_g: *lgbmr + ml_m: *logit + ml_pi: *lgbmc + - ml_g: *lgbmr + ml_m: *lgbmc + ml_pi: *logit + + +confidence_parameters: + level: [0.95, 0.90] # Confidence levels