diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index baa6d625..cbc5c99e 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -24,11 +24,10 @@ body: label: Minimum reproducible code snippet description: | Please provide a short reproducible code snippet. Example: - ```python import numpy as np import doubleml as dml - from doubleml.datasets import make_plr_CCDDHNR2018 + from doubleml.plm.datasets import make_plr_CCDDHNR2018 from sklearn.ensemble import RandomForestRegressor from sklearn.base import clone np.random.seed(3141) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4809c62a..a614dd73 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,7 +15,7 @@ To submit a **bug report**, you can use our ```python import numpy as np import doubleml as dml -from doubleml.datasets import make_plr_CCDDHNR2018 +from doubleml.plm.datasets import make_plr_CCDDHNR2018 from sklearn.ensemble import RandomForestRegressor from sklearn.base import clone np.random.seed(3141) diff --git a/doubleml/__init__.py b/doubleml/__init__.py index 102ea995..6cf7de96 100644 --- a/doubleml/__init__.py +++ b/doubleml/__init__.py @@ -1,6 +1,6 @@ import importlib.metadata -from .data import DoubleMLClusterData, DoubleMLData +from .data import DoubleMLClusterData, DoubleMLData, DoubleMLDIDData, DoubleMLPanelData, DoubleMLRDDData, DoubleMLSSMData from .did.did import DoubleMLDID from .did.did_cs import DoubleMLDIDCS from .double_ml_framework import DoubleMLFramework, concat @@ -29,6 +29,10 @@ "DoubleMLIIVM", "DoubleMLData", "DoubleMLClusterData", + "DoubleMLDIDData", + "DoubleMLPanelData", + "DoubleMLRDDData", + "DoubleMLSSMData", "DoubleMLDID", "DoubleMLDIDCS", "DoubleMLPQ", diff --git a/doubleml/data/__init__.py b/doubleml/data/__init__.py index d8a920c6..8343c228 100644 --- a/doubleml/data/__init__.py +++ b/doubleml/data/__init__.py @@ -2,12 +2,78 @@ The :mod:`doubleml.data` module implements data classes for double machine learning. """ +import warnings + from .base_data import DoubleMLData -from .cluster_data import DoubleMLClusterData +from .did_data import DoubleMLDIDData from .panel_data import DoubleMLPanelData +from .rdd_data import DoubleMLRDDData +from .ssm_data import DoubleMLSSMData + + +class DoubleMLClusterData(DoubleMLData): + """ + Backwards compatibility wrapper for DoubleMLData with cluster_cols. + This class is deprecated and will be removed in a future version. + Use DoubleMLData with cluster_cols instead. + """ + + def __init__( + self, + data, + y_col, + d_cols, + cluster_cols, + x_cols=None, + z_cols=None, + t_col=None, + s_col=None, + use_other_treat_as_covariate=True, + force_all_x_finite=True, + ): + warnings.warn( + "DoubleMLClusterData is deprecated and will be removed with version 0.12.0. " + "Use DoubleMLData with cluster_cols instead.", + FutureWarning, + stacklevel=2, + ) + super().__init__( + data=data, + y_col=y_col, + d_cols=d_cols, + x_cols=x_cols, + z_cols=z_cols, + cluster_cols=cluster_cols, + use_other_treat_as_covariate=use_other_treat_as_covariate, + force_all_x_finite=force_all_x_finite, + force_all_d_finite=True, + ) + + @classmethod + def from_arrays( + cls, x, y, d, cluster_vars, z=None, t=None, s=None, use_other_treat_as_covariate=True, force_all_x_finite=True + ): + """ + Initialize :class:`DoubleMLClusterData` from :class:`numpy.ndarray`'s. + This method is deprecated and will be removed with version 0.12.0, + use DoubleMLData.from_arrays with cluster_vars instead. + """ + warnings.warn( + "DoubleMLClusterData is deprecated and will be removed with version 0.12.0. " + "Use DoubleMLData.from_arrays with cluster_vars instead.", + FutureWarning, + stacklevel=2, + ) + return DoubleMLData.from_arrays( + x=x, + y=y, + d=d, + z=z, + cluster_vars=cluster_vars, + use_other_treat_as_covariate=use_other_treat_as_covariate, + force_all_x_finite=force_all_x_finite, + force_all_d_finite=True, + ) + -__all__ = [ - "DoubleMLData", - "DoubleMLClusterData", - "DoubleMLPanelData", -] +__all__ = ["DoubleMLData", "DoubleMLClusterData", "DoubleMLDIDData", "DoubleMLPanelData", "DoubleMLRDDData", "DoubleMLSSMData"] diff --git a/doubleml/data/base_data.py b/doubleml/data/base_data.py index 318508e9..a7ae30f6 100644 --- a/doubleml/data/base_data.py +++ b/doubleml/data/base_data.py @@ -98,19 +98,14 @@ class DoubleMLData(DoubleMLBaseData): x_cols : None, str or list The covariates. If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor - treatment variables ``d_cols``, nor instrumental variables ``z_cols`` are used as covariates. + treatment variables ``d_cols``, nor instrumental variables ``z_cols``, nor cluster variables ``cluster_cols`` + are used as covariates. Default is ``None``. z_cols : None, str or list The instrumental variable(s). - Default is ``None``. - - t_col : None or str - The time variable (only relevant/used for DiD Estimators). - Default is ``None``. - - s_col : None or str - The score or selection variable (only relevant/used for RDD or SSM Estimatiors). + Default is ``None``. cluster_cols : None, str or list + The cluster variable(s). Default is ``None``. use_other_treat_as_covariate : bool @@ -137,7 +132,7 @@ class DoubleMLData(DoubleMLBaseData): Examples -------- >>> from doubleml import DoubleMLData - >>> from doubleml.datasets import make_plr_CCDDHNR2018 + >>> from doubleml.plm.datasets import make_plr_CCDDHNR2018 >>> # initialization from pandas.DataFrame >>> df = make_plr_CCDDHNR2018(return_type='DataFrame') >>> obj_dml_data_from_df = DoubleMLData(df, 'y', 'd') @@ -153,8 +148,7 @@ def __init__( d_cols, x_cols=None, z_cols=None, - t_col=None, - s_col=None, + cluster_cols=None, use_other_treat_as_covariate=True, force_all_x_finite=True, force_all_d_finite=True, @@ -164,8 +158,7 @@ def __init__( self.y_col = y_col self.d_cols = d_cols self.z_cols = z_cols - self.t_col = t_col - self.s_col = s_col + self.cluster_cols = cluster_cols self.x_cols = x_cols self._check_disjoint_sets() self.use_other_treat_as_covariate = use_other_treat_as_covariate @@ -173,7 +166,9 @@ def __init__( self.force_all_d_finite = force_all_d_finite self._binary_treats = self._check_binary_treats() self._binary_outcome = self._check_binary_outcome() - self._set_y_z_t_s() + self._set_y_z() + if self.cluster_cols is not None: + self._set_cluster_vars() # by default, we initialize to the first treatment variable self.set_x_d(self.d_cols[0]) @@ -198,10 +193,12 @@ def _data_summary_str(self): f"Covariates: {self.x_cols}\n" f"Instrument variable(s): {self.z_cols}\n" ) - if self.t_col is not None: - data_summary += f"Time variable: {self.t_col}\n" - if self.s_col is not None: - data_summary += f"Score/Selection variable: {self.s_col}\n" + + if self.cluster_cols is not None: + data_summary += f"Cluster variable(s): {self.cluster_cols}\n" + + if hasattr(self, "is_cluster_data") and self.is_cluster_data: + data_summary += f"Is cluster data: {self.is_cluster_data}\n" data_summary += f"No. Observations: {self.n_obs}\n" return data_summary @@ -212,8 +209,7 @@ def from_arrays( y, d, z=None, - t=None, - s=None, + cluster_vars=None, use_other_treat_as_covariate=True, force_all_x_finite=True, force_all_d_finite=True, @@ -236,12 +232,8 @@ def from_arrays( Array of instrumental variables. Default is ``None``. - t : :class:`numpy.ndarray` - Array of the time variable (only relevant/used for DiD models). - Default is ``None``. - - s : :class:`numpy.ndarray` - Array of the score or selection variable (only relevant/used for RDD and SSM models). + cluster_vars : None or :class:`numpy.ndarray` + Array of cluster variables. Default is ``None``. use_other_treat_as_covariate : bool @@ -268,7 +260,7 @@ def from_arrays( Examples -------- >>> from doubleml import DoubleMLData - >>> from doubleml.datasets import make_plr_CCDDHNR2018 + >>> from doubleml.plm.datasets import make_plr_CCDDHNR2018 >>> (x, y, d) = make_plr_CCDDHNR2018(return_type='array') >>> obj_dml_data_from_array = DoubleMLData.from_arrays(x, y, d) """ @@ -302,6 +294,7 @@ def from_arrays( d = _assure_2d_array(d) y_col = "y" + if z is None: check_consistent_length(x, y, d) z_cols = None @@ -314,39 +307,30 @@ def from_arrays( else: z_cols = [f"z{i + 1}" for i in np.arange(z.shape[1])] - if t is None: - t_col = None + if cluster_vars is None: + cluster_cols = None else: - t = column_or_1d(t, warn=True) - check_consistent_length(x, y, d, t) - t_col = "t" - - if s is None: - s_col = None - else: - s = column_or_1d(s, warn=True) - check_consistent_length(x, y, d, s) - s_col = "s" + cluster_vars = check_array(cluster_vars, ensure_2d=False, allow_nd=False) + cluster_vars = _assure_2d_array(cluster_vars) + check_consistent_length(x, y, d, cluster_vars) + if cluster_vars.shape[1] == 1: + cluster_cols = ["cluster_var"] + else: + cluster_cols = [f"cluster_var{i + 1}" for i in np.arange(cluster_vars.shape[1])] if d.shape[1] == 1: d_cols = ["d"] else: d_cols = [f"d{i + 1}" for i in np.arange(d.shape[1])] - x_cols = [f"X{i + 1}" for i in np.arange(x.shape[1])] - - # baseline version with features, outcome and treatments + x_cols = [f"X{i + 1}" for i in np.arange(x.shape[1])] # baseline version with features, outcome and treatments data = pd.DataFrame(np.column_stack((x, y, d)), columns=x_cols + [y_col] + d_cols) - if z is not None: df_z = pd.DataFrame(z, columns=z_cols) data = pd.concat([data, df_z], axis=1) - - if t is not None: - data[t_col] = t - - if s is not None: - data[s_col] = s + if cluster_vars is not None: + df_cluster = pd.DataFrame(cluster_vars, columns=cluster_cols) + data = pd.concat([data, df_cluster], axis=1) return cls( data, @@ -354,8 +338,7 @@ def from_arrays( d_cols, x_cols, z_cols, - t_col, - s_col, + cluster_cols, use_other_treat_as_covariate, force_all_x_finite, force_all_d_finite, @@ -399,24 +382,37 @@ def z(self): return None @property - def t(self): + def cluster_cols(self): """ - Array of time variable. + The cluster variable(s). """ - if self.t_col is not None: - return self._t.values - else: - return None + return self._cluster_cols - @property - def s(self): - """ - Array of score or selection variable. - """ - if self.s_col is not None: - return self._s.values + @cluster_cols.setter + def cluster_cols(self, value): + reset_value = hasattr(self, "_cluster_cols") + if value is not None: + if isinstance(value, str): + value = [value] + if not isinstance(value, list): + raise TypeError( + "The cluster variable(s) cluster_cols must be of str or list type (or None). " + f"{str(value)} of type {str(type(value))} was passed." + ) + if not len(set(value)) == len(value): + raise ValueError("Invalid cluster variable(s) cluster_cols: Contains duplicate values.") + if not set(value).issubset(set(self.all_variables)): + raise ValueError("Invalid cluster variable(s) cluster_cols. At least one cluster variable is no data column.") + self._cluster_cols = value else: - return None + self._cluster_cols = None + + self._is_cluster_data = self._cluster_cols is not None + + if reset_value: + self._check_disjoint_sets() + if self.cluster_cols is not None: + self._set_cluster_vars() @property def n_treat(self): @@ -540,7 +536,7 @@ def y_col(self, value): self._y_col = value if reset_value: self._check_disjoint_sets() - self._set_y_z_t_s() + self._set_y_z() @property def z_cols(self): @@ -569,59 +565,30 @@ def z_cols(self, value): self._z_cols = value else: self._z_cols = None + if reset_value: self._check_disjoint_sets() - self._set_y_z_t_s() + self._set_y_z() @property - def t_col(self): + def n_cluster_vars(self): """ - The time variable. + The number of cluster variables. """ - return self._t_col - - @t_col.setter - def t_col(self, value): - reset_value = hasattr(self, "_t_col") - if value is not None: - if not isinstance(value, str): - raise TypeError( - "The time variable t_col must be of str type (or None). " - f"{str(value)} of type {str(type(value))} was passed." - ) - if value not in self.all_variables: - raise ValueError(f"Invalid time variable t_col. {value} is no data column.") - self._t_col = value + if self.cluster_cols is not None: + return len(self.cluster_cols) else: - self._t_col = None - if reset_value: - self._check_disjoint_sets() - self._set_y_z_t_s() + return 0 @property - def s_col(self): + def cluster_vars(self): """ - The score or selection variable. + Array of cluster variable(s). """ - return self._s_col - - @s_col.setter - def s_col(self, value): - reset_value = hasattr(self, "_s_col") - if value is not None: - if not isinstance(value, str): - raise TypeError( - "The score or selection variable s_col must be of str type (or None). " - f"{str(value)} of type {str(type(value))} was passed." - ) - if value not in self.all_variables: - raise ValueError(f"Invalid score or selection variable s_col. {value} is no data column.") - self._s_col = value + if self.cluster_cols is not None: + return self._cluster_vars.values else: - self._s_col = None - if reset_value: - self._check_disjoint_sets() - self._set_y_z_t_s() + return None @property def use_other_treat_as_covariate(self): @@ -686,7 +653,7 @@ def force_all_d_finite(self, value): # by default, we initialize to the first treatment variable self.set_x_d(self.d_cols[0]) - def _set_y_z_t_s(self): + def _set_y_z(self): def _set_attr(col): if col is None: return None @@ -695,8 +662,12 @@ def _set_attr(col): self._y = _set_attr(self.y_col) self._z = _set_attr(self.z_cols) - self._t = _set_attr(self.t_col) - self._s = _set_attr(self.s_col) + + def _set_cluster_vars(self): + """Set cluster variables.""" + if self.cluster_cols is not None: + assert_all_finite(self.data.loc[:, self.cluster_cols]) + self._cluster_vars = self.data.loc[:, self.cluster_cols] def set_x_d(self, treatment_var): """ @@ -730,10 +701,8 @@ def set_x_d(self, treatment_var): def _get_optional_col_sets(self): # this function can be extended in inherited subclasses z_cols_set = set(self.z_cols or []) - t_col_set = {self.t_col} if self.t_col else set() - s_col_set = {self.s_col} if self.s_col else set() - - return [z_cols_set, t_col_set, s_col_set] + cluster_cols_set = set(self.cluster_cols or []) + return [cluster_cols_set, z_cols_set] def _check_binary_treats(self): is_binary = pd.Series(dtype=bool, index=self.d_cols) @@ -763,7 +732,9 @@ def _check_disjoint(set1, set2, name1, arg1, name2, arg2): def _check_disjoint_sets(self): # this function can be extended in inherited subclasses self._check_disjoint_sets_y_d_x() - self._check_disjoint_sets_z_t_s() + self._check_disjoint_sets_z() + if self.cluster_cols is not None: + self._check_disjoint_sets_cluster_cols() def _check_disjoint_sets_y_d_x(self): y_col_set = {self.y_col} @@ -784,14 +755,12 @@ def _check_disjoint_sets_y_d_x(self): "(``x_cols``). Consider using parameter ``use_other_treat_as_covariate``." ) - def _check_disjoint_sets_z_t_s(self): + def _check_disjoint_sets_z(self): y_col_set = {self.y_col} x_cols_set = set(self.x_cols) d_cols_set = set(self.d_cols) z_cols_set = set(self.z_cols or []) - t_col_set = {self.t_col} if self.t_col else set() - s_col_set = {self.s_col} if self.s_col else set() instrument_checks_args = [ (y_col_set, "outcome variable", "``y_col``"), @@ -803,12 +772,32 @@ def _check_disjoint_sets_z_t_s(self): set1=set1, name1=name, arg1=argument, set2=z_cols_set, name2="instrumental variable", arg2="``z_cols``" ) - time_check_args = instrument_checks_args + [(z_cols_set, "instrumental variable", "``z_cols``")] - for set1, name, argument in time_check_args: - self._check_disjoint(set1=set1, name1=name, arg1=argument, set2=t_col_set, name2="time variable", arg2="``t_col``") - - score_check_args = time_check_args + [(t_col_set, "time variable", "``t_col``")] - for set1, name, argument in score_check_args: + def _check_disjoint_sets_cluster_cols(self): + """Check that cluster columns are disjoint from other variable sets.""" + cluster_cols_set = set(self.cluster_cols) + y_col_set = {self.y_col} + x_cols_set = set(self.x_cols) + d_cols_set = set(self.d_cols) + z_cols_set = set(self.z_cols or []) + checks = [ + (y_col_set, "outcome variable", "``y_col``"), + (d_cols_set, "treatment variable", "``d_cols``"), + (x_cols_set, "covariate", "``x_cols``"), + (z_cols_set, "instrumental variable", "``z_cols``"), + ] + for set1, name, arg in checks: self._check_disjoint( - set1=set1, name1=name, arg1=argument, set2=s_col_set, name2="score or selection variable", arg2="``s_col``" + set1=set1, + name1=name, + arg1=arg, + set2=cluster_cols_set, + name2="cluster variable(s)", + arg2="``cluster_cols``", ) + + @property + def is_cluster_data(self): + """ + Flag indicating whether this data object is being used for cluster data. + """ + return self._is_cluster_data diff --git a/doubleml/data/cluster_data.py b/doubleml/data/cluster_data.py deleted file mode 100644 index 658ab0cc..00000000 --- a/doubleml/data/cluster_data.py +++ /dev/null @@ -1,289 +0,0 @@ -import io - -import numpy as np -import pandas as pd -from sklearn.utils import assert_all_finite -from sklearn.utils.validation import check_array - -from doubleml.data.base_data import DoubleMLBaseData, DoubleMLData -from doubleml.utils._estimation import _assure_2d_array - - -class DoubleMLClusterData(DoubleMLData): - """Double machine learning data-backend for data with cluster variables. - - :class:`DoubleMLClusterData` objects can be initialized from - :class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s. - - Parameters - ---------- - data : :class:`pandas.DataFrame` - The data. - - y_col : str - The outcome variable. - - d_cols : str or list - The treatment variable(s). - - cluster_cols : str or list - The cluster variable(s). - - x_cols : None, str or list - The covariates. - If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor - treatment variables ``d_cols``, nor instrumental variables ``z_cols`` are used as covariates. - Default is ``None``. - - z_cols : None, str or list - The instrumental variable(s). - Default is ``None``. - - t_col : None or str - The time variable (only relevant/used for DiD Estimators). - Default is ``None``. - - s_col : None or str - The score or selection variable (only relevant/used for RDD and SSM Estimatiors). - Default is ``None``. - - use_other_treat_as_covariate : bool - Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. - Default is ``True``. - - force_all_x_finite : bool or str - Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. - Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are - allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). - Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used - for the nuisance functions are capable to provide valid predictions with missings and / or infinite values - in the covariates ``x``. - Default is ``True``. - - Examples - -------- - >>> from doubleml import DoubleMLClusterData - >>> from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021 - >>> # initialization from pandas.DataFrame - >>> df = make_pliv_multiway_cluster_CKMS2021(return_type='DataFrame') - >>> obj_dml_data_from_df = DoubleMLClusterData(df, 'Y', 'D', ['cluster_var_i', 'cluster_var_j'], z_cols='Z') - >>> # initialization from np.ndarray - >>> (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021(return_type='array') - >>> obj_dml_data_from_array = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars, z) - """ - - def __init__( - self, - data, - y_col, - d_cols, - cluster_cols, - x_cols=None, - z_cols=None, - t_col=None, - s_col=None, - use_other_treat_as_covariate=True, - force_all_x_finite=True, - ): - DoubleMLBaseData.__init__(self, data) - - # we need to set cluster_cols (needs _data) before call to the super __init__ because of the x_cols setter - self.cluster_cols = cluster_cols - self._set_cluster_vars() - DoubleMLData.__init__( - self, data, y_col, d_cols, x_cols, z_cols, t_col, s_col, use_other_treat_as_covariate, force_all_x_finite - ) - self._check_disjoint_sets_cluster_cols() - - def __str__(self): - data_summary = self._data_summary_str() - buf = io.StringIO() - self.data.info(verbose=False, buf=buf) - df_info = buf.getvalue() - res = ( - "================== DoubleMLClusterData Object ==================\n" - + "\n------------------ Data summary ------------------\n" - + data_summary - + "\n------------------ DataFrame info ------------------\n" - + df_info - ) - return res - - def _data_summary_str(self): - data_summary = ( - f"Outcome variable: {self.y_col}\n" - f"Treatment variable(s): {self.d_cols}\n" - f"Cluster variable(s): {self.cluster_cols}\n" - f"Covariates: {self.x_cols}\n" - f"Instrument variable(s): {self.z_cols}\n" - ) - if self.t_col is not None: - data_summary += f"Time variable: {self.t_col}\n" - if self.s_col is not None: - data_summary += f"Score/Selection variable: {self.s_col}\n" - - data_summary += f"No. Observations: {self.n_obs}\n" - return data_summary - - @classmethod - def from_arrays( - cls, x, y, d, cluster_vars, z=None, t=None, s=None, use_other_treat_as_covariate=True, force_all_x_finite=True - ): - """ - Initialize :class:`DoubleMLClusterData` from :class:`numpy.ndarray`'s. - - Parameters - ---------- - x : :class:`numpy.ndarray` - Array of covariates. - - y : :class:`numpy.ndarray` - Array of the outcome variable. - - d : :class:`numpy.ndarray` - Array of treatment variables. - - cluster_vars : :class:`numpy.ndarray` - Array of cluster variables. - - z : None or :class:`numpy.ndarray` - Array of instrumental variables. - Default is ``None``. - - t : :class:`numpy.ndarray` - Array of the time variable (only relevant/used for DiD models). - Default is ``None``. - - s : :class:`numpy.ndarray` - Array of the score or selection variable (only relevant/used for RDD or SSM models). - Default is ``None``. - - use_other_treat_as_covariate : bool - Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. - Default is ``True``. - - force_all_x_finite : bool or str - Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. - Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are - allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). - Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used - for the nuisance functions are capable to provide valid predictions with missings and / or infinite values - in the covariates ``x``. - Default is ``True``. - - Examples - -------- - >>> from doubleml import DoubleMLClusterData - >>> from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021 - >>> (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021(return_type='array') - >>> obj_dml_data_from_array = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars, z) - """ - dml_data = DoubleMLData.from_arrays(x, y, d, z, t, s, use_other_treat_as_covariate, force_all_x_finite) - cluster_vars = check_array(cluster_vars, ensure_2d=False, allow_nd=False) - cluster_vars = _assure_2d_array(cluster_vars) - if cluster_vars.shape[1] == 1: - cluster_cols = ["cluster_var"] - else: - cluster_cols = [f"cluster_var{i + 1}" for i in np.arange(cluster_vars.shape[1])] - - data = pd.concat((pd.DataFrame(cluster_vars, columns=cluster_cols), dml_data.data), axis=1) - - return cls( - data, - dml_data.y_col, - dml_data.d_cols, - cluster_cols, - dml_data.x_cols, - dml_data.z_cols, - dml_data.t_col, - dml_data.s_col, - dml_data.use_other_treat_as_covariate, - dml_data.force_all_x_finite, - ) - - @property - def cluster_cols(self): - """ - The cluster variable(s). - """ - return self._cluster_cols - - @cluster_cols.setter - def cluster_cols(self, value): - reset_value = hasattr(self, "_cluster_cols") - if isinstance(value, str): - value = [value] - if not isinstance(value, list): - raise TypeError( - "The cluster variable(s) cluster_cols must be of str or list type. " - f"{str(value)} of type {str(type(value))} was passed." - ) - if not len(set(value)) == len(value): - raise ValueError("Invalid cluster variable(s) cluster_cols: Contains duplicate values.") - if not set(value).issubset(set(self.all_variables)): - raise ValueError("Invalid cluster variable(s) cluster_cols. At least one cluster variable is no data column.") - self._cluster_cols = value - if reset_value: - self._check_disjoint_sets() - self._set_cluster_vars() - - @property - def n_cluster_vars(self): - """ - The number of cluster variables. - """ - return len(self.cluster_cols) - - @property - def cluster_vars(self): - """ - Array of cluster variable(s). - """ - return self._cluster_vars.values - - def _get_optional_col_sets(self): - base_optional_col_sets = super()._get_optional_col_sets() - cluster_cols_set = set(self.cluster_cols) - return [cluster_cols_set] + base_optional_col_sets - - def _check_disjoint_sets(self): - # apply the standard checks from the DoubleMLData class - super(DoubleMLClusterData, self)._check_disjoint_sets() - self._check_disjoint_sets_cluster_cols() - - def _check_disjoint_sets_cluster_cols(self): - # apply the standard checks from the DoubleMLData class - super(DoubleMLClusterData, self)._check_disjoint_sets() - - # special checks for the additional cluster variables - cluster_cols_set = set(self.cluster_cols) - y_col_set = {self.y_col} - x_cols_set = set(self.x_cols) - d_cols_set = set(self.d_cols) - - z_cols_set = set(self.z_cols or []) - t_col_set = {self.t_col} if self.t_col else set() - s_col_set = {self.s_col} if self.s_col else set() - - # TODO: X can not be used as cluster variable - cluster_checks_args = [ - (y_col_set, "outcome variable", "``y_col``"), - (d_cols_set, "treatment variable", "``d_cols``"), - (x_cols_set, "covariate", "``x_cols``"), - (z_cols_set, "instrumental variable", "``z_cols``"), - (t_col_set, "time variable", "``t_col``"), - (s_col_set, "score or selection variable", "``s_col``"), - ] - for set1, name, argument in cluster_checks_args: - self._check_disjoint( - set1=set1, - name1=name, - arg1=argument, - set2=cluster_cols_set, - name2="cluster variable(s)", - arg2="``cluster_cols``", - ) - - def _set_cluster_vars(self): - assert_all_finite(self.data.loc[:, self.cluster_cols]) - self._cluster_vars = self.data.loc[:, self.cluster_cols] diff --git a/doubleml/data/did_data.py b/doubleml/data/did_data.py new file mode 100644 index 00000000..1554e4bc --- /dev/null +++ b/doubleml/data/did_data.py @@ -0,0 +1,320 @@ +import io + +import pandas as pd +from sklearn.utils import assert_all_finite +from sklearn.utils.validation import check_consistent_length, column_or_1d + +from doubleml.data.base_data import DoubleMLData + + +class DoubleMLDIDData(DoubleMLData): + """Double machine learning data-backend for Difference-in-Differences models. + + :class:`DoubleMLDIDData` objects can be initialized from + :class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s. + + Parameters + ---------- + data : :class:`pandas.DataFrame` + The data. + + y_col : str + The outcome variable. + + d_cols : str or list + The treatment variable(s). + + t_col : str + The time variable for DiD models. + + x_cols : None, str or list + The covariates. + If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor + treatment variables ``d_cols``, nor instrumental variables ``z_cols``, nor time variable ``t_col`` + are used as covariates. + Default is ``None``. + + z_cols : None, str or list + The instrumental variable(s). + Default is ``None``. + + cluster_cols : None, str or list + The cluster variable(s). + Default is ``None``. + + use_other_treat_as_covariate : bool + Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. + Default is ``True``. + + force_all_x_finite : bool or str + Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. + Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are + allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). + Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used + for the nuisance functions are capable to provide valid predictions with missings and / or infinite values + in the covariates ``x``. + Default is ``True``. + + force_all_d_finite : bool + Indicates whether to raise an error on infinite values and / or missings in the treatment variables ``d``. + Default is ``True``. Examples + -------- + >>> from doubleml import DoubleMLDIDData + >>> from doubleml.did.datasets import make_did_SZ2020 + >>> # initialization from pandas.DataFrame + >>> df = make_did_SZ2020(return_type='DataFrame') + >>> obj_dml_data_from_df = DoubleMLDIDData(df, 'y', 'd', 't') + >>> # initialization from np.ndarray + >>> (x, y, d, t) = make_did_SZ2020(return_type='array') + >>> obj_dml_data_from_array = DoubleMLDIDData.from_arrays(x, y, d, t=t) + """ + + def __init__( + self, + data, + y_col, + d_cols, + x_cols=None, + z_cols=None, + t_col=None, + cluster_cols=None, + use_other_treat_as_covariate=True, + force_all_x_finite=True, + force_all_d_finite=True, + ): # Initialize _t_col to None first to avoid AttributeError during parent init + self._t_col = None + + # Store whether x_cols was originally None to reset it later + x_cols_was_none = x_cols is None + + # Call parent constructor first to set _data + super().__init__( + data=data, + y_col=y_col, + d_cols=d_cols, + x_cols=x_cols, + z_cols=z_cols, + cluster_cols=cluster_cols, + use_other_treat_as_covariate=use_other_treat_as_covariate, + force_all_x_finite=force_all_x_finite, + force_all_d_finite=force_all_d_finite, + ) + + # Set time column directly to avoid triggering checks during init + if t_col is not None: + if not isinstance(t_col, str): + raise TypeError( + "The time variable t_col must be of str type (or None). " + f"{str(t_col)} of type {str(type(t_col))} was passed." + ) + if t_col not in self.all_variables: + raise ValueError(f"Invalid time variable t_col. {t_col} is no data column.") + self._t_col = t_col + + # If x_cols was originally None, reset it to exclude the time column + if x_cols_was_none and t_col is not None: + self.x_cols = None + + # Now run the checks and set variables + if t_col is not None: + self._check_disjoint_sets() + self._set_y_z_t() + + # Set time variable array after data is loaded + self._set_time_var() + + @classmethod + def from_arrays( + cls, + x, + y, + d, + z=None, + t=None, + cluster_vars=None, + use_other_treat_as_covariate=True, + force_all_x_finite=True, + force_all_d_finite=True, + ): + """ + Initialize :class:`DoubleMLDIDData` object from :class:`numpy.ndarray`'s. + + Parameters + ---------- + x : :class:`numpy.ndarray` + Array of covariates. + + y : :class:`numpy.ndarray` + Array of the outcome variable. + + d : :class:`numpy.ndarray` + Array of treatment variables. + + t : :class:`numpy.ndarray` + Array of the time variable for DiD models. + + z : None or :class:`numpy.ndarray` + Array of instrumental variables. + Default is ``None``. + + cluster_vars : None or :class:`numpy.ndarray` + Array of cluster variables. + Default is ``None``. + + use_other_treat_as_covariate : bool + Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. + Default is ``True``. + + force_all_x_finite : bool or str + Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. + Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are + allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). + Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used + for the nuisance functions are capable to provide valid predictions with missings and / or infinite values + in the covariates ``x``. + Default is ``True``. + + force_all_d_finite : bool + Indicates whether to raise an error on infinite values and / or missings in the treatment variables ``d``. + Default is ``True``. + + Examples + -------- + >>> from doubleml import DoubleMLDIDData + >>> from doubleml.did.datasets import make_did_SZ2020 + >>> (x, y, d, t) = make_did_SZ2020(return_type='array') + >>> obj_dml_data_from_array = DoubleMLDIDData.from_arrays(x, y, d, t=t) + """ + # Prepare time variable + + if t is None: + t_col = None + else: + t = column_or_1d(t, warn=True) + check_consistent_length(x, y, d, t) + t_col = "t" + + # Create base data using parent class method + base_data = DoubleMLData.from_arrays( + x, y, d, z, cluster_vars, use_other_treat_as_covariate, force_all_x_finite, force_all_d_finite + ) + + # Add time variable to the DataFrame + data = pd.concat((base_data.data, pd.DataFrame(t, columns=[t_col])), axis=1) + + if t is not None: + data[t_col] = t + + return cls( + data, + base_data.y_col, + base_data.d_cols, + base_data.x_cols, + base_data.z_cols, + t_col, + base_data.cluster_cols, + base_data.use_other_treat_as_covariate, + base_data.force_all_x_finite, + base_data.force_all_d_finite, + ) + + @property + def t_col(self): + """ + The time variable. + """ + return self._t_col + + @t_col.setter + def t_col(self, value): + reset_value = hasattr(self, "_t_col") + if value is not None: + if not isinstance(value, str): + raise TypeError( + "The time variable t_col must be of str type (or None). " + f"{str(value)} of type {str(type(value))} was passed." + ) + if value not in self.all_variables: + raise ValueError(f"Invalid time variable t_col. {value} is no data column.") + self._t_col = value + else: + self._t_col = None + if reset_value: + self._check_disjoint_sets() + self._set_y_z_t() + + @property + def t(self): + """ + Array of time variable. + """ + if self.t_col is not None: + return self._t.values + else: + return None + + def _get_optional_col_sets(self): + """Get optional column sets including time column.""" + base_optional_col_sets = super()._get_optional_col_sets() + if self.t_col is not None: + t_col_set = {self.t_col} + return [t_col_set] + base_optional_col_sets + return base_optional_col_sets + + def _check_disjoint_sets(self): + """Check that time column doesn't overlap with other variables.""" + # Apply standard checks from parent class + super()._check_disjoint_sets() + if self.t_col is not None: + self._check_disjoint_sets_t_col() + + def _check_disjoint_sets_t_col(self): + """Check that time column is disjoint from other variable sets.""" + t_col_set = {self.t_col} + y_col_set = {self.y_col} + x_cols_set = set(self.x_cols) + d_cols_set = set(self.d_cols) + z_cols_set = set(self.z_cols or []) + cluster_cols_set = set(self.cluster_cols or []) + + t_checks_args = [ + (y_col_set, "outcome variable", "``y_col``"), + (d_cols_set, "treatment variable", "``d_cols``"), + (x_cols_set, "covariate", "``x_cols``"), + (z_cols_set, "instrumental variable", "``z_cols``"), + (cluster_cols_set, "cluster variable(s)", "``cluster_cols``"), + ] + for set1, name, argument in t_checks_args: + self._check_disjoint( + set1=set1, + name1=name, + arg1=argument, + set2=t_col_set, + name2="time variable", + arg2="``t_col``", + ) + + def _set_time_var(self): + """Set the time variable array.""" + if hasattr(self, "_data") and self.t_col in self.data.columns: + self._t = self.data.loc[:, self.t_col] + + def _set_y_z_t(self): + def _set_attr(col): + if col is None: + return None + assert_all_finite(self.data.loc[:, col]) + return self.data.loc[:, col] + + self._y = _set_attr(self.y_col) + self._z = _set_attr(self.z_cols) + self._t = _set_attr(self.t_col) + + def __str__(self): + """String representation.""" + data_summary = self._data_summary_str() + buf = io.StringIO() + print("================== DoubleMLDIDData Object ==================", file=buf) + print(f"Time variable: {self.t_col}", file=buf) + print(data_summary, file=buf) + return buf.getvalue() diff --git a/doubleml/data/panel_data.py b/doubleml/data/panel_data.py index 4ba659ce..22aad0f7 100644 --- a/doubleml/data/panel_data.py +++ b/doubleml/data/panel_data.py @@ -93,6 +93,10 @@ def __init__( self._datetime_unit = _is_valid_datetime_unit(datetime_unit) self._set_id_var() + # Set time column before calling parent constructor + self.t_col = t_col + + # Call parent constructor DoubleMLData.__init__( self, data=data, @@ -100,14 +104,17 @@ def __init__( d_cols=d_cols, x_cols=x_cols, z_cols=z_cols, - t_col=t_col, - s_col=None, use_other_treat_as_covariate=use_other_treat_as_covariate, force_all_x_finite=force_all_x_finite, force_all_d_finite=False, ) + # reset index to ensure a simple RangeIndex self.data.reset_index(drop=True, inplace=True) + + # Set time variable array after data is loaded + self._set_time_var() + if self.n_treat != 1: raise ValueError("Only one treatment column is allowed for panel data.") @@ -174,7 +181,7 @@ def t(self): """ Array of time variable. """ - if pd.api.types.is_datetime64_any_dtype(self._d): + if pd.api.types.is_datetime64_any_dtype(self._t): return self._t.values.astype(f"datetime64[{self.datetime_unit}]") else: return self._t.values @@ -230,6 +237,8 @@ def g_col(self): @DoubleMLData.d_cols.setter def d_cols(self, value): + if isinstance(value, str): + value = [value] super(self.__class__, self.__class__).d_cols.__set__(self, value) if hasattr(self, "_g_values"): self._g_values = np.sort(np.unique(self.d)) # update unique values of g @@ -248,11 +257,28 @@ def n_groups(self): """ return len(self.g_values) - @DoubleMLData.t_col.setter + @property + def t_col(self): + """ + The time variable. + """ + return self._t_col + + @t_col.setter def t_col(self, value): if value is None: raise TypeError("Invalid time variable t_col. Time variable required for panel data.") - super(self.__class__, self.__class__).t_col.__set__(self, value) + if not isinstance(value, str): + raise TypeError( + "The time variable t_col must be of str type. " f"{str(value)} of type {str(type(value))} was passed." + ) + # Check if data exists (during initialization it might not) + if hasattr(self, "_data") and value not in self.all_variables: + raise ValueError(f"Invalid time variable t_col. {value} is no data column.") + self._t_col = value + # Update time variable array if data is already loaded + if hasattr(self, "_data"): + self._set_time_var() if hasattr(self, "_t_values"): self._t_values = np.sort(np.unique(self.t)) # update unique values of t @@ -273,17 +299,16 @@ def n_t_periods(self): def _get_optional_col_sets(self): base_optional_col_sets = super()._get_optional_col_sets() id_col_set = {self.id_col} - return [id_col_set] + base_optional_col_sets + t_col_set = {self.t_col} + return [id_col_set, t_col_set] + base_optional_col_sets def _check_disjoint_sets(self): # apply the standard checks from the DoubleMLData class super(DoubleMLPanelData, self)._check_disjoint_sets() self._check_disjoint_sets_id_col() + self._check_disjoint_sets_t_col() def _check_disjoint_sets_id_col(self): - # apply the standard checks from the DoubleMLData class - super(DoubleMLPanelData, self)._check_disjoint_sets() - # special checks for the additional id variable (and the time variable) id_col_set = {self.id_col} y_col_set = {self.y_col} @@ -311,7 +336,38 @@ def _check_disjoint_sets_id_col(self): arg2="``id_col``", ) + def _check_disjoint_sets_t_col(self): + """Check that time column is disjoint from other variable sets.""" + t_col_set = {self.t_col} + y_col_set = {self.y_col} + x_cols_set = set(self.x_cols) + d_cols_set = set(self.d_cols) + z_cols_set = set(self.z_cols or []) + id_col_set = {self.id_col} + + t_checks_args = [ + (y_col_set, "outcome variable", "``y_col``"), + (d_cols_set, "treatment variable", "``d_cols``"), + (x_cols_set, "covariate", "``x_cols``"), + (z_cols_set, "instrumental variable", "``z_cols``"), + (id_col_set, "identifier variable", "``id_col``"), + ] + for set1, name, argument in t_checks_args: + self._check_disjoint( + set1=set1, + name1=name, + arg1=argument, + set2=t_col_set, + name2="time variable", + arg2="``t_col``", + ) + def _set_id_var(self): assert_all_finite(self.data.loc[:, self.id_col]) self._id_var = self.data.loc[:, self.id_col] self._id_var_unique = np.unique(self._id_var.values) + + def _set_time_var(self): + """Set the time variable array.""" + if hasattr(self, "_data") and self.t_col in self.data.columns: + self._t = self.data.loc[:, self.t_col] diff --git a/doubleml/data/rdd_data.py b/doubleml/data/rdd_data.py new file mode 100644 index 00000000..6bf4a830 --- /dev/null +++ b/doubleml/data/rdd_data.py @@ -0,0 +1,274 @@ +import io + +import pandas as pd +from sklearn.utils.validation import check_array + +from doubleml.data.base_data import DoubleMLData +from doubleml.utils._estimation import _assure_2d_array + + +class DoubleMLRDDData(DoubleMLData): + """Double machine learning data-backend for Regression Discontinuity Design models. + + :class:`DoubleMLRDDData` objects can be initialized from + :class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s. + + Parameters + ---------- + data : :class:`pandas.DataFrame` + The data. + + y_col : str + The outcome variable. + + d_cols : str or list + The treatment variable(s). + + score_col : str + The score/running variable for RDD models. + + x_cols : None, str or list + The covariates. + If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor + treatment variables ``d_cols``, nor instrumental variables ``z_cols``, nor score variable ``score_col`` are + used as covariates. + Default is ``None``. + + z_cols : None, str or list + The instrumental variable(s). + Default is ``None``. + + cluster_cols : None, str or list + The cluster variable(s). + Default is ``None``. + + use_other_treat_as_covariate : bool + Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. + Default is ``True``. + + force_all_x_finite : bool or str + Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. + Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are + allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). + Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used + for the nuisance functions are capable to provide valid predictions with missings and / or infinite values + in the covariates ``x``. + Default is ``True``. + + force_all_d_finite : bool + Indicates whether to raise an error on infinite values and / or missings in the treatment variables ``d``. + Default is ``True``. + + Examples + -------- + >>> from doubleml import DoubleMLRDDData + >>> from doubleml.rdd.datasets import make_rdd_data + >>> # initialization from pandas.DataFrame + >>> df = make_rdd_data(return_type='DataFrame') + >>> obj_dml_data_from_df = DoubleMLRDDData(df, 'y', 'd', 's') + >>> # initialization from np.ndarray + >>> (x, y, d, s) = make_rdd_data(return_type='array') + >>> obj_dml_data_from_array = DoubleMLRDDData.from_arrays(x, y, d, s=s) + """ + + def __init__( + self, + data, + y_col, + d_cols, + score_col, + x_cols=None, + z_cols=None, + cluster_cols=None, + use_other_treat_as_covariate=True, + force_all_x_finite=True, + force_all_d_finite=True, + ): + # Set score column before calling parent constructor + self.score_col = score_col + + # Call parent constructor + super().__init__( + data=data, + y_col=y_col, + d_cols=d_cols, + x_cols=x_cols, + z_cols=z_cols, + cluster_cols=cluster_cols, + use_other_treat_as_covariate=use_other_treat_as_covariate, + force_all_x_finite=force_all_x_finite, + force_all_d_finite=force_all_d_finite, + ) + + # Set score variable array after data is loaded + self._set_score_var() + + @classmethod + def from_arrays( + cls, + x, + y, + d, + score, + z=None, + cluster_vars=None, + use_other_treat_as_covariate=True, + force_all_x_finite=True, + force_all_d_finite=True, + ): + """ + Initialize :class:`DoubleMLRDDData` object from :class:`numpy.ndarray`'s. + + Parameters + ---------- + x : :class:`numpy.ndarray` + Array of covariates. + + y : :class:`numpy.ndarray` + Array of the outcome variable. + + d : :class:`numpy.ndarray` + Array of treatment variables. + + score : :class:`numpy.ndarray` + Array of the score/running variable for RDD models. + + z : None or :class:`numpy.ndarray` + Array of instrumental variables. + Default is ``None``. + + cluster_vars : None or :class:`numpy.ndarray` + Array of cluster variables. + Default is ``None``. + + use_other_treat_as_covariate : bool + Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. + Default is ``True``. + + force_all_x_finite : bool or str + Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. + Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are + allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). + Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used + for the nuisance functions are capable to provide valid predictions with missings and / or infinite values + in the covariates ``x``. + Default is ``True``. + + force_all_d_finite : bool + Indicates whether to raise an error on infinite values and / or missings in the treatment variables ``d``. + Default is ``True``. + + Examples + -------- + >>> from doubleml import DoubleMLRDDData + >>> from doubleml.rdd.datasets import make_rdd_data + >>> (x, y, d, s) = make_rdd_data(return_type='array') + >>> obj_dml_data_from_array = DoubleMLRDDData.from_arrays(x, y, d, s=s) + """ + # Prepare score variable + score = check_array(score, ensure_2d=False, allow_nd=False) + score = _assure_2d_array(score) + if score.shape[1] != 1: + raise ValueError("score must be a single column.") + score_col = "score" + + # Create base data using parent class method + base_data = DoubleMLData.from_arrays( + x, y, d, z, cluster_vars, use_other_treat_as_covariate, force_all_x_finite, force_all_d_finite + ) + + # Add score variable to the DataFrame + data = pd.concat((base_data.data, pd.DataFrame(score, columns=[score_col])), axis=1) + + return cls( + data, + base_data.y_col, + base_data.d_cols, + score_col, + base_data.x_cols, + base_data.z_cols, + base_data.cluster_cols, + base_data.use_other_treat_as_covariate, + base_data.force_all_x_finite, + base_data.force_all_d_finite, + ) + + @property + def score_col(self): + """ + The score/running variable. + """ + return self._score_col + + @score_col.setter + def score_col(self, value): + if not isinstance(value, str): + raise TypeError( + "The score variable score_col must be of str type. " f"{str(value)} of type {str(type(value))} was passed." + ) + # Check if data exists (during initialization it might not) + if hasattr(self, "_data") and value not in self.all_variables: + raise ValueError("Invalid score variable score_col. The score variable is no data column.") + self._score_col = value + # Update score variable array if data is already loaded + if hasattr(self, "_data"): + self._set_score_var() + + @property + def score(self): + """ + Array of score/running variable. + """ + return self._score.values + + def _get_optional_col_sets(self): + """Get optional column sets including score column.""" + base_optional_col_sets = super()._get_optional_col_sets() + score_col_set = {self.score_col} + return [score_col_set] + base_optional_col_sets + + def _check_disjoint_sets(self): + """Check that score column doesn't overlap with other variables.""" + # Apply standard checks from parent class + super()._check_disjoint_sets() + self._check_disjoint_sets_score_col() + + def _check_disjoint_sets_score_col(self): + """Check that score column is disjoint from other variable sets.""" + score_col_set = {self.score_col} + y_col_set = {self.y_col} + x_cols_set = set(self.x_cols) + d_cols_set = set(self.d_cols) + z_cols_set = set(self.z_cols or []) + cluster_cols_set = set(self.cluster_cols or []) + + s_checks_args = [ + (y_col_set, "outcome variable", "``y_col``"), + (d_cols_set, "treatment variable", "``d_cols``"), + (x_cols_set, "covariate", "``x_cols``"), + (z_cols_set, "instrumental variable", "``z_cols``"), + (cluster_cols_set, "cluster variable(s)", "``cluster_cols``"), + ] + for set1, name, argument in s_checks_args: + self._check_disjoint( + set1=set1, + name1=name, + arg1=argument, + set2=score_col_set, + name2="score variable", + arg2="``score_col``", + ) + + def _set_score_var(self): + """Set the score variable array.""" + if hasattr(self, "_data") and self.score_col in self.data.columns: + self._score = self.data.loc[:, self.score_col] + + def __str__(self): + """String representation.""" + data_summary = self._data_summary_str() + buf = io.StringIO() + print("================== DoubleMLRDDData Object ==================", file=buf) + print(f"Score variable: {self.score_col}", file=buf) + print(data_summary, file=buf) + return buf.getvalue() diff --git a/doubleml/data/ssm_data.py b/doubleml/data/ssm_data.py new file mode 100644 index 00000000..2785821a --- /dev/null +++ b/doubleml/data/ssm_data.py @@ -0,0 +1,318 @@ +import io + +import pandas as pd +from sklearn.utils import assert_all_finite +from sklearn.utils.validation import check_array + +from doubleml.data.base_data import DoubleMLData +from doubleml.utils._estimation import _assure_2d_array + + +class DoubleMLSSMData(DoubleMLData): + """Double machine learning data-backend for Sample Selection Models. + + :class:`DoubleMLSSMData` objects can be initialized from + :class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s. + + Parameters + ---------- + data : :class:`pandas.DataFrame` + The data. + + y_col : str + The outcome variable. + + d_cols : str or list + The treatment variable(s). + + s_col : str + The selection variable for SSM models. + + x_cols : None, str or list + The covariates. + If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor + treatment variables ``d_cols``, nor instrumental variables ``z_cols``, nor selection variable ``s_col`` + are used as covariates. + Default is ``None``. + + z_cols : None, str or list + The instrumental variable(s). + Default is ``None``. + + cluster_cols : None, str or list + The cluster variable(s). + Default is ``None``. + + use_other_treat_as_covariate : bool + Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. + Default is ``True``. + + force_all_x_finite : bool or str + Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. + Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are + allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). + Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used + for the nuisance functions are capable to provide valid predictions with missings and / or infinite values + in the covariates ``x``. + Default is ``True``. + + force_all_d_finite : bool + Indicates whether to raise an error on infinite values and / or missings in the treatment variables ``d``. + Default is ``True``. + + Examples + -------- + >>> from doubleml import DoubleMLSSMData + >>> from doubleml.irm.datasets import make_ssm_data + >>> # initialization from pandas.DataFrame + >>> df = make_ssm_data(return_type='DataFrame') + >>> obj_dml_data_from_df = DoubleMLSSMData(df, 'y', 'd', 's') + >>> # initialization from np.ndarray + >>> (x, y, d, s) = make_ssm_data(return_type='array') + >>> obj_dml_data_from_array = DoubleMLSSMData.from_arrays(x, y, d, s=s) + """ + + def __init__( + self, + data, + y_col, + d_cols, + x_cols=None, + z_cols=None, + s_col=None, + cluster_cols=None, + use_other_treat_as_covariate=True, + force_all_x_finite=True, + force_all_d_finite=True, + ): + # Initialize _s_col to None first to avoid AttributeError during parent init + self._s_col = None + + # Store whether x_cols was originally None to reset it later + x_cols_was_none = x_cols is None + + # Call parent constructor + super().__init__( + data=data, + y_col=y_col, + d_cols=d_cols, + x_cols=x_cols, + z_cols=z_cols, + cluster_cols=cluster_cols, + use_other_treat_as_covariate=use_other_treat_as_covariate, + force_all_x_finite=force_all_x_finite, + force_all_d_finite=force_all_d_finite, + ) + + # Set selection column directly to avoid triggering checks during init + if s_col is not None: + if not isinstance(s_col, str): + raise TypeError( + "The selection variable s_col must be of str type (or None). " + f"{str(s_col)} of type {str(type(s_col))} was passed." + ) + if s_col not in self.all_variables: + raise ValueError(f"Invalid selection variable s_col. {s_col} is no data column.") + self._s_col = s_col + + # If x_cols was originally None, reset it to exclude the selection column + if x_cols_was_none and s_col is not None: + self.x_cols = None + + # Now run the checks and set variables + if s_col is not None: + self._check_disjoint_sets() + self._set_y_z_s() + + # Set selection variable array after data is loaded + self._set_selection_var() + + @classmethod + def from_arrays( + cls, + x, + y, + d, + z=None, + s=None, + cluster_vars=None, + use_other_treat_as_covariate=True, + force_all_x_finite=True, + force_all_d_finite=True, + ): + """ + Initialize :class:`DoubleMLSSMData` object from :class:`numpy.ndarray`'s. + + Parameters + ---------- + x : :class:`numpy.ndarray` + Array of covariates. + + y : :class:`numpy.ndarray` + Array of the outcome variable. + + d : :class:`numpy.ndarray` + Array of treatment variables. + + s : :class:`numpy.ndarray` + Array of the selection variable for SSM models. + + z : None or :class:`numpy.ndarray` + Array of instrumental variables. + Default is ``None``. + + cluster_vars : None or :class:`numpy.ndarray` + Array of cluster variables. + Default is ``None``. + + use_other_treat_as_covariate : bool + Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates. + Default is ``True``. + + force_all_x_finite : bool or str + Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``. + Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are + allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed). + Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used + for the nuisance functions are capable to provide valid predictions with missings and / or infinite values + in the covariates ``x``. + Default is ``True``. + + force_all_d_finite : bool + Indicates whether to raise an error on infinite values and / or missings in the treatment variables ``d``. + Default is ``True``. + + Examples + -------- + >>> from doubleml import DoubleMLSSMData + >>> from doubleml.irm.datasets import make_ssm_data + >>> (x, y, d, s) = make_ssm_data(return_type='array') + >>> obj_dml_data_from_array = DoubleMLSSMData.from_arrays(x, y, d, s=s) + """ + # Prepare selection variable + s = check_array(s, ensure_2d=False, allow_nd=False) + s = _assure_2d_array(s) + if s.shape[1] != 1: + raise ValueError("s must be a single column.") + s_col = "s" + + # Create base data using parent class method + base_data = DoubleMLData.from_arrays( + x, y, d, z, cluster_vars, use_other_treat_as_covariate, force_all_x_finite, force_all_d_finite + ) + + # Add selection variable to the DataFrame + data = pd.concat((base_data.data, pd.DataFrame(s, columns=[s_col])), axis=1) + + return cls( + data, + base_data.y_col, + base_data.d_cols, + base_data.x_cols, + base_data.z_cols, + s_col, + base_data.cluster_cols, + base_data.use_other_treat_as_covariate, + base_data.force_all_x_finite, + base_data.force_all_d_finite, + ) + + @property + def s(self): + """ + Array of score or selection variable. + """ + if self.s_col is not None: + return self._s.values + else: + return None + + @property + def s_col(self): + """ + The selection variable. + """ + return self._s_col + + @s_col.setter + def s_col(self, value): + reset_value = hasattr(self, "_s_col") + if value is not None: + if not isinstance(value, str): + raise TypeError( + "The selection variable s_col must be of str type (or None). " + f"{str(value)} of type {str(type(value))} was passed." + ) + if value not in self.all_variables: + raise ValueError(f"Invalid selection variable s_col. {value} is no data column.") + self._s_col = value + else: + self._s_col = None + if reset_value: + self._check_disjoint_sets() + self._set_y_z_s() + + def _get_optional_col_sets(self): + """Get optional column sets including selection column.""" + base_optional_col_sets = super()._get_optional_col_sets() + if self.s_col is not None: + s_col_set = {self.s_col} + return [s_col_set] + base_optional_col_sets + return base_optional_col_sets + + def _check_disjoint_sets(self): + """Check that selection column doesn't overlap with other variables.""" + # Apply standard checks from parent class + super()._check_disjoint_sets() + self._check_disjoint_sets_s_col() + + def _check_disjoint_sets_s_col(self): + """Check that selection column is disjoint from other variable sets.""" + s_col_set = {self.s_col} + y_col_set = {self.y_col} + x_cols_set = set(self.x_cols) + d_cols_set = set(self.d_cols) + z_cols_set = set(self.z_cols or []) + cluster_cols_set = set(self.cluster_cols or []) + + s_checks_args = [ + (y_col_set, "outcome variable", "``y_col``"), + (d_cols_set, "treatment variable", "``d_cols``"), + (x_cols_set, "covariate", "``x_cols``"), + (z_cols_set, "instrumental variable", "``z_cols``"), + (cluster_cols_set, "cluster variable(s)", "``cluster_cols``"), + ] + for set1, name, argument in s_checks_args: + self._check_disjoint( + set1=set1, + name1=name, + arg1=argument, + set2=s_col_set, + name2="selection variable", + arg2="``s_col``", + ) + + def _set_selection_var(self): + """Set the selection variable array.""" + if hasattr(self, "_data") and self.s_col in self.data.columns: + self._s = self.data.loc[:, [self.s_col]].squeeze() + + def _set_y_z_s(self): + def _set_attr(col): + if col is None: + return None + assert_all_finite(self.data.loc[:, col]) + return self.data.loc[:, col] + + self._y = _set_attr(self.y_col) + self._z = _set_attr(self.z_cols) + self._s = _set_attr(self.s_col) + + def __str__(self): + """String representation.""" + data_summary = self._data_summary_str() + buf = io.StringIO() + print("================== DoubleMLSSMData Object ==================", file=buf) + print(f"Selection variable: {self.s_col}", file=buf) + print(data_summary, file=buf) + return buf.getvalue() diff --git a/doubleml/data/tests/conftest.py b/doubleml/data/tests/conftest.py index 6960b58a..fcefabce 100644 --- a/doubleml/data/tests/conftest.py +++ b/doubleml/data/tests/conftest.py @@ -2,7 +2,8 @@ import pandas as pd import pytest -from doubleml.datasets import make_irm_data, make_plr_turrell2018 +from doubleml.irm.datasets import make_irm_data +from doubleml.plm.datasets import make_plr_turrell2018 @pytest.fixture(scope="session", params=[(500, 10), (1000, 20), (1000, 100)]) diff --git a/doubleml/data/tests/test_cluster_data.py b/doubleml/data/tests/test_cluster_data.py index e95dfa03..bbb7d97f 100644 --- a/doubleml/data/tests/test_cluster_data.py +++ b/doubleml/data/tests/test_cluster_data.py @@ -2,44 +2,44 @@ import pandas as pd import pytest -from doubleml import DoubleMLClusterData -from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021, make_plr_CCDDHNR2018 +from doubleml import DoubleMLData, DoubleMLDIDData, DoubleMLSSMData +from doubleml.plm.datasets import make_pliv_multiway_cluster_CKMS2021, make_plr_CCDDHNR2018 @pytest.mark.ci def test_obj_vs_from_arrays(): np.random.seed(3141) dml_data = make_pliv_multiway_cluster_CKMS2021(N=10, M=10) - dml_data_from_array = DoubleMLClusterData.from_arrays( - dml_data.data[dml_data.x_cols], - dml_data.data[dml_data.y_col], - dml_data.data[dml_data.d_cols], - dml_data.data[dml_data.cluster_cols], - dml_data.data[dml_data.z_cols], + dml_data_from_array = DoubleMLData.from_arrays( + x=dml_data.data[dml_data.x_cols], + y=dml_data.data[dml_data.y_col], + d=dml_data.data[dml_data.d_cols], + cluster_vars=dml_data.data[dml_data.cluster_cols], + z=dml_data.data[dml_data.z_cols], ) df = dml_data.data.copy() df.rename( columns={"cluster_var_i": "cluster_var1", "cluster_var_j": "cluster_var2", "Y": "y", "D": "d", "Z": "z"}, inplace=True ) - assert dml_data_from_array.data.equals(df) + assert dml_data_from_array.data[list(df.columns)].equals(df) # with a single cluster variable - dml_data_from_array = DoubleMLClusterData.from_arrays( - dml_data.data[dml_data.x_cols], - dml_data.data[dml_data.y_col], - dml_data.data[dml_data.d_cols], - dml_data.data[dml_data.cluster_cols[1]], - dml_data.data[dml_data.z_cols], + dml_data_from_array = DoubleMLData.from_arrays( + x=dml_data.data[dml_data.x_cols], + y=dml_data.data[dml_data.y_col], + d=dml_data.data[dml_data.d_cols], + cluster_vars=dml_data.data[dml_data.cluster_cols[1]], + z=dml_data.data[dml_data.z_cols], ) df = dml_data.data.copy().drop(columns="cluster_var_i") df.rename(columns={"cluster_var_j": "cluster_var", "Y": "y", "D": "d", "Z": "z"}, inplace=True) - assert dml_data_from_array.data.equals(df) + assert dml_data_from_array.data[list(df.columns)].equals(df) @pytest.mark.ci def test_x_cols_setter_defaults_w_cluster(): df = pd.DataFrame(np.tile(np.arange(6), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "xx3", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1") + dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1") assert dml_data.x_cols == ["xx1", "xx2", "xx3"] dml_data.x_cols = ["xx1", "xx3"] assert dml_data.x_cols == ["xx1", "xx3"] @@ -48,37 +48,27 @@ def test_x_cols_setter_defaults_w_cluster(): # with instrument df = pd.DataFrame(np.tile(np.arange(6), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "z", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", z_cols="z") + dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", z_cols="z") assert dml_data.x_cols == ["xx1", "xx2"] # without instrument and with time df = pd.DataFrame(np.tile(np.arange(6), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "tt", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", t_col="tt") + dml_data = DoubleMLDIDData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", t_col="tt") assert dml_data.x_cols == ["xx1", "xx2"] # with instrument and with time df = pd.DataFrame(np.tile(np.arange(7), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "zz", "tt", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", z_cols="zz", t_col="tt") + dml_data = DoubleMLDIDData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", z_cols="zz", t_col="tt") assert dml_data.x_cols == ["xx1", "xx2"] # without instrument and with selection df = pd.DataFrame(np.tile(np.arange(6), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "ss", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", s_col="ss") + dml_data = DoubleMLSSMData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", s_col="ss") assert dml_data.x_cols == ["xx1", "xx2"] # with instrument and with selection df = pd.DataFrame(np.tile(np.arange(7), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "zz", "ss", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", z_cols="zz", s_col="ss") - assert dml_data.x_cols == ["xx1", "xx2"] - - # without instrument with time with selection - df = pd.DataFrame(np.tile(np.arange(7), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "tt", "ss", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", t_col="tt", s_col="ss") - assert dml_data.x_cols == ["xx1", "xx2"] - - # with instrument with time with selection - df = pd.DataFrame(np.tile(np.arange(8), (6, 1)), columns=["yy", "dd", "xx1", "xx2", "zz", "tt", "ss", "cluster1"]) - dml_data = DoubleMLClusterData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", z_cols="zz", t_col="tt", s_col="ss") + dml_data = DoubleMLSSMData(df, y_col="yy", d_cols="dd", cluster_cols="cluster1", z_cols="zz", s_col="ss") assert dml_data.x_cols == ["xx1", "xx2"] @@ -88,7 +78,7 @@ def test_cluster_cols_setter(): dml_data = make_plr_CCDDHNR2018(n_obs=100) df = dml_data.data.copy().iloc[:, :10] df.columns = [f"X{i + 1}" for i in np.arange(7)] + ["y", "d1", "d2"] - dml_data = DoubleMLClusterData( + dml_data = DoubleMLData( df, "y", ["d1", "d2"], cluster_cols=[f"X{i + 1}" for i in [5, 6]], x_cols=[f"X{i + 1}" for i in np.arange(5)] ) @@ -107,7 +97,10 @@ def test_cluster_cols_setter(): with pytest.raises(ValueError, match=msg): dml_data.cluster_cols = "X13" - msg = r"The cluster variable\(s\) cluster_cols must be of str or list type. " "5 of type was passed." + msg = ( + r"The cluster variable\(s\) cluster_cols must be of str or list type \(or None\)\. " + "5 of type was passed." + ) with pytest.raises(TypeError, match=msg): dml_data.cluster_cols = 5 @@ -129,39 +122,39 @@ def test_disjoint_sets(): r"and cluster variable\(s\) \(``cluster_cols``\)." ) with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], cluster_cols="yy") + _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], cluster_cols="yy") msg = ( r"At least one variable/column is set as treatment variable \(``d_cols``\) " r"and cluster variable\(s\) \(``cluster_cols``\)." ) with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], cluster_cols="dd1") + _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], cluster_cols="dd1") msg = ( r"At least one variable/column is set as covariate \(``x_cols``\) " r"and cluster variable\(s\) \(``cluster_cols``\)." ) with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], cluster_cols="xx2") + _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], cluster_cols="xx2") msg = ( r"At least one variable/column is set as instrumental variable \(``z_cols``\) " r"and cluster variable\(s\) \(``cluster_cols``\)." ) with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1"], z_cols=["xx2"], cluster_cols="xx2") + _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1"], z_cols=["xx2"], cluster_cols="xx2") msg = ( - r"At least one variable/column is set as time variable \(``t_col``\) " - r"and cluster variable\(s\) \(``cluster_cols``\)." + r"At least one variable/column is set as cluster variable\(s\) \(``cluster_cols``\) " + r"and time variable \(``t_col``\)." ) with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1"], t_col="xx2", cluster_cols="xx2") + _ = DoubleMLDIDData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1"], t_col="xx2", cluster_cols="xx2") msg = ( - r"At least one variable/column is set as score or selection variable \(``s_col``\) " - r"and cluster variable\(s\) \(``cluster_cols``\)." + r"At least one variable/column is set as cluster variable\(s\) \(``cluster_cols``\) " + r"and selection variable \(``s_col``\)." ) with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1"], s_col="xx2", cluster_cols="xx2") + _ = DoubleMLSSMData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1"], s_col="xx2", cluster_cols="xx2") @pytest.mark.ci @@ -171,13 +164,13 @@ def test_duplicates(): msg = r"Invalid cluster variable\(s\) cluster_cols: Contains duplicate values." with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData(dml_cluster_data.data, y_col="y", d_cols=["d"], cluster_cols=["X3", "X2", "X3"]) + _ = DoubleMLData(dml_cluster_data.data, y_col="Y", d_cols=["D"], cluster_cols=["X3", "X2", "X3"]) with pytest.raises(ValueError, match=msg): dml_cluster_data.cluster_cols = ["X3", "X2", "X3"] msg = "Invalid pd.DataFrame: Contains duplicate column names." with pytest.raises(ValueError, match=msg): - _ = DoubleMLClusterData( + _ = DoubleMLData( pd.DataFrame(np.zeros((100, 5)), columns=["y", "d", "X3", "X2", "y"]), y_col="y", d_cols=["d"], cluster_cols=["X2"] ) @@ -186,7 +179,7 @@ def test_duplicates(): def test_dml_datatype(): data_array = np.zeros((100, 10)) with pytest.raises(TypeError): - _ = DoubleMLClusterData(data_array, y_col="y", d_cols=["d"], cluster_cols=["X3", "X2"]) + _ = DoubleMLData(data_array, y_col="y", d_cols=["d"], cluster_cols=["X3", "X2"]) @pytest.mark.ci @@ -198,7 +191,7 @@ def test_cluster_data_str(): dml_str = str(dml_data) # Check that all important sections are present in the string - assert "================== DoubleMLClusterData Object ==================" in dml_str + assert "================== DoubleMLData Object ==================" in dml_str assert "------------------ Data summary ------------------" in dml_str assert "------------------ DataFrame info ------------------" in dml_str @@ -215,16 +208,14 @@ def test_cluster_data_str(): df["time_var"] = 1 df["score_var"] = 0.5 - dml_data_with_optional = DoubleMLClusterData( + dml_data_with_optional = DoubleMLDIDData( data=df, y_col="Y", d_cols="D", cluster_cols=["cluster_var_i", "cluster_var_j"], z_cols="Z", t_col="time_var", - s_col="score_var", ) dml_str_optional = str(dml_data_with_optional) assert "Time variable: time_var" in dml_str_optional - assert "Score/Selection variable: score_var" in dml_str_optional diff --git a/doubleml/data/tests/test_dml_data.py b/doubleml/data/tests/test_dml_data.py index 7cf394b5..4890ac7a 100644 --- a/doubleml/data/tests/test_dml_data.py +++ b/doubleml/data/tests/test_dml_data.py @@ -3,15 +3,15 @@ import pytest from sklearn.linear_model import Lasso, LogisticRegression -from doubleml import DoubleMLData, DoubleMLDIDCS, DoubleMLPLR, DoubleMLSSM +from doubleml import DoubleMLData, DoubleMLDIDCS, DoubleMLDIDData, DoubleMLPLR, DoubleMLSSM, DoubleMLSSMData from doubleml.data.base_data import DoubleMLBaseData -from doubleml.datasets import ( +from doubleml.did.datasets import make_did_SZ2020 +from doubleml.irm.datasets import make_ssm_data +from doubleml.plm.datasets import ( _make_pliv_data, make_pliv_CHS2015, make_plr_CCDDHNR2018, - make_ssm_data, ) -from doubleml.did.datasets import make_did_SZ2020 class DummyDataClass(DoubleMLBaseData): @@ -66,7 +66,7 @@ def test_obj_vs_from_arrays(): dml_data_from_array = DoubleMLData.from_arrays( dml_data.data[dml_data.x_cols], dml_data.data[dml_data.y_col], dml_data.data[dml_data.d_cols] ) - assert dml_data_from_array.data.equals(dml_data.data) + assert np.array_equal(dml_data_from_array.data, dml_data.data) dml_data = _make_pliv_data(n_obs=100) dml_data_from_array = DoubleMLData.from_arrays( @@ -75,7 +75,7 @@ def test_obj_vs_from_arrays(): dml_data.data[dml_data.d_cols], dml_data.data[dml_data.z_cols], ) - assert dml_data_from_array.data.equals(dml_data.data) + assert np.array_equal(dml_data_from_array.data, dml_data.data) dml_data = make_pliv_CHS2015(n_obs=100, dim_z=5) dml_data_from_array = DoubleMLData.from_arrays( @@ -102,7 +102,7 @@ def test_obj_vs_from_arrays(): assert np.array_equal(dml_data_from_array.data, dml_data.data) dml_data = make_did_SZ2020(n_obs=100, cross_sectional_data=True) - dml_data_from_array = DoubleMLData.from_arrays( + dml_data_from_array = DoubleMLDIDData.from_arrays( x=dml_data.data[dml_data.x_cols], y=dml_data.data[dml_data.y_col], d=dml_data.data[dml_data.d_cols], @@ -113,7 +113,7 @@ def test_obj_vs_from_arrays(): # check with instrument and time variable dml_data = make_did_SZ2020(n_obs=100, cross_sectional_data=True) dml_data.data["z"] = dml_data.data["t"] - dml_data_from_array = DoubleMLData.from_arrays( + dml_data_from_array = DoubleMLDIDData.from_arrays( x=dml_data.data[dml_data.x_cols], y=dml_data.data[dml_data.y_col], d=dml_data.data[dml_data.d_cols], @@ -146,14 +146,11 @@ def test_dml_data_no_instr_no_time_no_selection(): dml_data = make_plr_CCDDHNR2018(n_obs=100) assert dml_data.z is None assert dml_data.n_instr == 0 - assert dml_data.t is None x, y, d = make_plr_CCDDHNR2018(n_obs=100, return_type="array") dml_data = DoubleMLData.from_arrays(x, y, d) assert dml_data.z is None assert dml_data.n_instr == 0 - assert dml_data.t is None - assert dml_data.s is None @pytest.mark.ci @@ -193,32 +190,22 @@ def test_x_cols_setter_defaults(): # without instrument with time df = pd.DataFrame(np.tile(np.arange(5), (4, 1)), columns=["yy", "dd", "xx1", "xx2", "tt"]) - dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", t_col="tt") + dml_data = DoubleMLDIDData(df, y_col="yy", d_cols="dd", t_col="tt") assert dml_data.x_cols == ["xx1", "xx2"] # with instrument with time df = pd.DataFrame(np.tile(np.arange(6), (4, 1)), columns=["yy", "dd", "xx1", "xx2", "zz", "tt"]) - dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", z_cols="zz", t_col="tt") + dml_data = DoubleMLDIDData(df, y_col="yy", d_cols="dd", z_cols="zz", t_col="tt") assert dml_data.x_cols == ["xx1", "xx2"] # without instrument with selection df = pd.DataFrame(np.tile(np.arange(5), (4, 1)), columns=["yy", "dd", "xx1", "xx2", "ss"]) - dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", s_col="ss") + dml_data = DoubleMLSSMData(df, y_col="yy", d_cols="dd", s_col="ss") assert dml_data.x_cols == ["xx1", "xx2"] # with instrument with selection df = pd.DataFrame(np.tile(np.arange(6), (4, 1)), columns=["yy", "dd", "xx1", "xx2", "zz", "ss"]) - dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", z_cols="zz", s_col="ss") - assert dml_data.x_cols == ["xx1", "xx2"] - - # with selection and time - df = pd.DataFrame(np.tile(np.arange(6), (4, 1)), columns=["yy", "dd", "xx1", "xx2", "tt", "ss"]) - dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", t_col="tt", s_col="ss") - assert dml_data.x_cols == ["xx1", "xx2"] - - # with instrument, selection and time - df = pd.DataFrame(np.tile(np.arange(7), (4, 1)), columns=["yy", "dd", "xx1", "xx2", "zz", "tt", "ss"]) - dml_data = DoubleMLData(df, y_col="yy", d_cols="dd", z_cols="zz", t_col="tt", s_col="ss") + dml_data = DoubleMLSSMData(df, y_col="yy", d_cols="dd", z_cols="zz", s_col="ss") assert dml_data.x_cols == ["xx1", "xx2"] @@ -324,7 +311,7 @@ def test_t_col_setter(): np.random.seed(3141) df = make_did_SZ2020(n_obs=100, cross_sectional_data=True, return_type=pd.DataFrame) df["t_new"] = np.ones(shape=(100,)) - dml_data = DoubleMLData(df, "y", "d", [f"Z{i + 1}" for i in np.arange(4)], t_col="t") + dml_data = DoubleMLDIDData(df, "y", "d", x_cols=[f"Z{i + 1}" for i in np.arange(4)], t_col="t") # check that after changing t_col, the t array gets updated t_comp = dml_data.data["t_new"].values @@ -349,18 +336,18 @@ def test_s_col_setter(): np.random.seed(3141) df = make_ssm_data(n_obs=100, return_type=pd.DataFrame) df["s_new"] = np.ones(shape=(100,)) - dml_data = DoubleMLData(df, "y", "d", [f"X{i + 1}" for i in np.arange(4)], s_col="s") + dml_data = DoubleMLSSMData(df, "y", "d", x_cols=[f"X{i + 1}" for i in np.arange(4)], s_col="s") # check that after changing s_col, the s array gets updated s_comp = dml_data.data["s_new"].values dml_data.s_col = "s_new" assert np.array_equal(dml_data.s, s_comp) - msg = r"Invalid score or selection variable s_col. a13 is no data column." + msg = r"Invalid selection variable s_col. a13 is no data column." with pytest.raises(ValueError, match=msg): dml_data.s_col = "a13" - msg = r"The score or selection variable s_col must be of str type \(or None\). " "5 of type was passed." + msg = r"The selection variable s_col must be of str type \(or None\). " "5 of type was passed." with pytest.raises(TypeError, match=msg): dml_data.s_col = 5 @@ -462,41 +449,33 @@ def test_disjoint_sets(): # time variable msg = r"At least one variable/column is set as outcome variable \(``y_col``\) and time variable \(``t_col``\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], t_col="yy") + _ = DoubleMLDIDData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], t_col="yy") msg = r"At least one variable/column is set as treatment variable \(``d_cols``\) and time variable \(``t_col``\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], t_col="dd1") + _ = DoubleMLDIDData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], t_col="dd1") msg = r"At least one variable/column is set as covariate \(``x_cols``\) and time variable \(``t_col``\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], t_col="xx2") + _ = DoubleMLDIDData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], t_col="xx2") msg = r"At least one variable/column is set as instrumental variable \(``z_cols``\) and time variable \(``t_col``\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], z_cols="zz", t_col="zz") + _ = DoubleMLDIDData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], z_cols="zz", t_col="zz") # score or selection variable - msg = ( - r"At least one variable/column is set as outcome variable \(``y_col``\) and score or selection variable \(``s_col``\)." - ) + msg = r"At least one variable/column is set as outcome variable \(``y_col``\) and selection variable \(``s_col``\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], s_col="yy") - msg = ( - r"At least one variable/column is set as treatment variable \(``d_cols``\) " - r"and score or selection variable \(``s_col``\)." - ) + _ = DoubleMLSSMData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], s_col="yy") + msg = r"At least one variable/column is set as treatment variable \(``d_cols``\) " r"and selection variable \(``s_col``\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], s_col="dd1") - msg = r"At least one variable/column is set as covariate \(``x_cols``\) and score or selection variable \(``s_col``\)." + _ = DoubleMLSSMData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], s_col="dd1") + msg = r"At least one variable/column is set as covariate \(``x_cols``\) and selection variable \(``s_col``\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], s_col="xx2") + _ = DoubleMLSSMData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], s_col="xx2") msg = ( r"At least one variable/column is set as instrumental variable \(``z_cols``\) " - r"and score or selection variable \(``s_col``\)." + r"and selection variable \(``s_col``\)." ) with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], z_cols="zz", s_col="zz") - msg = r"At least one variable/column is set as time variable \(``t_col``\) and score or selection variable \(``s_col``\)." - with pytest.raises(ValueError, match=msg): - _ = DoubleMLData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], t_col="tt", s_col="tt") + _ = DoubleMLSSMData(df, y_col="yy", d_cols=["dd1"], x_cols=["xx1", "xx2"], z_cols="zz", s_col="zz") @pytest.mark.ci diff --git a/doubleml/data/tests/test_panel_data.py b/doubleml/data/tests/test_panel_data.py index e1a7c925..d8506b0d 100644 --- a/doubleml/data/tests/test_panel_data.py +++ b/doubleml/data/tests/test_panel_data.py @@ -33,7 +33,7 @@ def test_t_col_setter(): with pytest.raises(ValueError, match=msg): dml_data.t_col = "a13" - msg = r"The time variable t_col must be of str type \(or None\). " "5 of type was passed." + msg = r"The time variable t_col must be of str type. " "5 of type was passed." with pytest.raises(TypeError, match=msg): dml_data.t_col = 5 diff --git a/doubleml/datasets.py b/doubleml/datasets.py deleted file mode 100644 index 0dcd33c7..00000000 --- a/doubleml/datasets.py +++ /dev/null @@ -1,1620 +0,0 @@ -import warnings - -import numpy as np -import pandas as pd -from scipy.linalg import toeplitz -from scipy.optimize import minimize_scalar -from sklearn.datasets import make_spd_matrix -from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures - -from doubleml.data import DoubleMLClusterData, DoubleMLData -from doubleml.utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_cluster_data_alias, _get_dml_data_alias - -_array_alias = _get_array_alias() -_data_frame_alias = _get_data_frame_alias() -_dml_data_alias = _get_dml_data_alias() -_dml_cluster_data_alias = _get_dml_cluster_data_alias() - - -def fetch_401K(return_type="DoubleMLData", polynomial_features=False): - """ - Data set on financial wealth and 401(k) plan participation. - - Parameters - ---------- - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - polynomial_features : - If ``True`` polynomial features are added (see replication files of Chernozhukov et al. (2018)). - - References - ---------- - Abadie, A. (2003), Semiparametric instrumental variable estimation of treatment response models. Journal of - Econometrics, 113(2): 231-263. - - Chernozhukov, V., Chetverikov, D., Demirer, M., Duflo, E., Hansen, C., Newey, W. and Robins, J. (2018), - Double/debiased machine learning for treatment and structural parameters. The Econometrics Journal, 21: C1-C68. - doi:`10.1111/ectj.12097 `_. - """ - url = "https://github.com/VC2015/DMLonGitHub/raw/master/sipp1991.dta" - raw_data = pd.read_stata(url) - - y_col = "net_tfa" - d_cols = ["e401"] - x_cols = ["age", "inc", "educ", "fsize", "marr", "twoearn", "db", "pira", "hown"] - - data = raw_data.copy() - - if polynomial_features: - raise NotImplementedError("polynomial_features os not implemented yet for fetch_401K.") - - if return_type in _data_frame_alias + _dml_data_alias: - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, y_col, d_cols, x_cols) - else: - raise ValueError("Invalid return_type.") - - -def fetch_bonus(return_type="DoubleMLData", polynomial_features=False): - """ - Data set on the Pennsylvania Reemployment Bonus experiment. - - Parameters - ---------- - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - polynomial_features : - If ``True`` polynomial features are added (see replication files of Chernozhukov et al. (2018)). - - References - ---------- - Bilias Y. (2000), Sequential Testing of Duration Data: The Case of Pennsylvania 'Reemployment Bonus' Experiment. - Journal of Applied Econometrics, 15(6): 575-594. - - Chernozhukov, V., Chetverikov, D., Demirer, M., Duflo, E., Hansen, C., Newey, W. and Robins, J. (2018), - Double/debiased machine learning for treatment and structural parameters. The Econometrics Journal, 21: C1-C68. - doi:`10.1111/ectj.12097 `_. - """ - url = "https://raw.githubusercontent.com/VC2015/DMLonGitHub/master/penn_jae.dat" - raw_data = pd.read_csv(url, sep=r"\s+") - - ind = (raw_data["tg"] == 0) | (raw_data["tg"] == 4) - data = raw_data.copy()[ind] - data.reset_index(inplace=True) - data["tg"] = data["tg"].replace(4, 1) - data["inuidur1"] = np.log(data["inuidur1"]) - - # variable dep as factor (dummy encoding) - dummy_enc = OneHotEncoder(drop="first", categories="auto").fit(data.loc[:, ["dep"]]) - xx = dummy_enc.transform(data.loc[:, ["dep"]]).toarray() - data["dep1"] = xx[:, 0] - data["dep2"] = xx[:, 1] - - y_col = "inuidur1" - d_cols = ["tg"] - x_cols = [ - "female", - "black", - "othrace", - "dep1", - "dep2", - "q2", - "q3", - "q4", - "q5", - "q6", - "agelt35", - "agegt54", - "durable", - "lusd", - "husd", - ] - - if polynomial_features: - poly = PolynomialFeatures(2, include_bias=False) - data_transf = poly.fit_transform(data[x_cols]) - x_cols = list(poly.get_feature_names_out(x_cols)) - - data_transf = pd.DataFrame(data_transf, columns=x_cols) - data = pd.concat((data[[y_col] + d_cols], data_transf), axis=1, sort=False) - - if return_type in _data_frame_alias + _dml_data_alias: - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, y_col, d_cols, x_cols) - else: - raise ValueError("Invalid return_type.") - - -def _g(x): - return np.power(np.sin(x), 2) - - -def _m(x, nu=0.0, gamma=1.0): - return 0.5 / np.pi * (np.sinh(gamma)) / (np.cosh(gamma) - np.cos(x - nu)) - - -def make_plr_CCDDHNR2018(n_obs=500, dim_x=20, alpha=0.5, return_type="DoubleMLData", **kwargs): - """ - Generates data from a partially linear regression model used in Chernozhukov et al. (2018) for Figure 1. - The data generating process is defined as - - .. math:: - - d_i &= m_0(x_i) + s_1 v_i, & &v_i \\sim \\mathcal{N}(0,1), - - y_i &= \\alpha d_i + g_0(x_i) + s_2 \\zeta_i, & &\\zeta_i \\sim \\mathcal{N}(0,1), - - - with covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a matrix with entries - :math:`\\Sigma_{kj} = 0.7^{|j-k|}`. - The nuisance functions are given by - - .. math:: - - m_0(x_i) &= a_0 x_{i,1} + a_1 \\frac{\\exp(x_{i,3})}{1+\\exp(x_{i,3})}, - - g_0(x_i) &= b_0 \\frac{\\exp(x_{i,1})}{1+\\exp(x_{i,1})} + b_1 x_{i,3}. - - Parameters - ---------- - n_obs : - The number of observations to simulate. - dim_x : - The number of covariates. - alpha : - The value of the causal parameter. - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - - If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d)``. - **kwargs - Additional keyword arguments to set non-default values for the parameters - :math:`a_0=1`, :math:`a_1=0.25`, :math:`s_1=1`, :math:`b_0=1`, :math:`b_1=0.25` or :math:`s_2=1`. - - References - ---------- - Chernozhukov, V., Chetverikov, D., Demirer, M., Duflo, E., Hansen, C., Newey, W. and Robins, J. (2018), - Double/debiased machine learning for treatment and structural parameters. The Econometrics Journal, 21: C1-C68. - doi:`10.1111/ectj.12097 `_. - """ - a_0 = kwargs.get("a_0", 1.0) - a_1 = kwargs.get("a_1", 0.25) - s_1 = kwargs.get("s_1", 1.0) - - b_0 = kwargs.get("b_0", 1.0) - b_1 = kwargs.get("b_1", 0.25) - s_2 = kwargs.get("s_2", 1.0) - - cov_mat = toeplitz([np.power(0.7, k) for k in range(dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - cov_mat, - size=[ - n_obs, - ], - ) - - d = ( - a_0 * x[:, 0] - + a_1 * np.divide(np.exp(x[:, 2]), 1 + np.exp(x[:, 2])) - + s_1 - * np.random.standard_normal( - size=[ - n_obs, - ] - ) - ) - y = ( - alpha * d - + b_0 * np.divide(np.exp(x[:, 0]), 1 + np.exp(x[:, 0])) - + b_1 * x[:, 2] - + s_2 - * np.random.standard_normal( - size=[ - n_obs, - ] - ) - ) - - if return_type in _array_alias: - return x, y, d - elif return_type in _data_frame_alias + _dml_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] - data = pd.DataFrame(np.column_stack((x, y, d)), columns=x_cols + ["y", "d"]) - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, "y", "d", x_cols) - else: - raise ValueError("Invalid return_type.") - - -def make_plr_turrell2018(n_obs=100, dim_x=20, theta=0.5, return_type="DoubleMLData", **kwargs): - """ - Generates data from a partially linear regression model used in a blog article by Turrell (2018). - The data generating process is defined as - - .. math:: - - d_i &= m_0(x_i' b) + v_i, & &v_i \\sim \\mathcal{N}(0,1), - - y_i &= \\theta d_i + g_0(x_i' b) + u_i, & &u_i \\sim \\mathcal{N}(0,1), - - - with covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a random symmetric, - positive-definite matrix generated with :py:meth:`sklearn.datasets.make_spd_matrix`. - :math:`b` is a vector with entries :math:`b_j=\\frac{1}{j}` and the nuisance functions are given by - - .. math:: - - m_0(x_i) &= \\frac{1}{2 \\pi} \\frac{\\sinh(\\gamma)}{\\cosh(\\gamma) - \\cos(x_i-\\nu)}, - - g_0(x_i) &= \\sin(x_i)^2. - - Parameters - ---------- - n_obs : - The number of observations to simulate. - dim_x : - The number of covariates. - theta : - The value of the causal parameter. - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - - If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d)``. - **kwargs - Additional keyword arguments to set non-default values for the parameters - :math:`\\nu=0`, or :math:`\\gamma=1`. - - References - ---------- - Turrell, A. (2018), Econometrics in Python part I - Double machine learning, Markov Wanderer: A blog on economics, - science, coding and data. `https://aeturrell.com/blog/posts/econometrics-in-python-parti-ml/ - `_. - """ - nu = kwargs.get("nu", 0.0) - gamma = kwargs.get("gamma", 1.0) - - b = [1 / k for k in range(1, dim_x + 1)] - sigma = make_spd_matrix(dim_x) - - x = np.random.multivariate_normal( - np.zeros(dim_x), - sigma, - size=[ - n_obs, - ], - ) - G = _g(np.dot(x, b)) - M = _m(np.dot(x, b), nu=nu, gamma=gamma) - d = M + np.random.standard_normal( - size=[ - n_obs, - ] - ) - y = ( - np.dot(theta, d) - + G - + np.random.standard_normal( - size=[ - n_obs, - ] - ) - ) - - if return_type in _array_alias: - return x, y, d - elif return_type in _data_frame_alias + _dml_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] - data = pd.DataFrame(np.column_stack((x, y, d)), columns=x_cols + ["y", "d"]) - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, "y", "d", x_cols) - else: - raise ValueError("Invalid return_type.") - - -def make_irm_data(n_obs=500, dim_x=20, theta=0, R2_d=0.5, R2_y=0.5, return_type="DoubleMLData"): - """ - Generates data from a interactive regression (IRM) model. - The data generating process is defined as - - .. math:: - - d_i &= 1\\left\\lbrace \\frac{\\exp(c_d x_i' \\beta)}{1+\\exp(c_d x_i' \\beta)} > v_i \\right\\rbrace, & &v_i - \\sim \\mathcal{U}(0,1), - - y_i &= \\theta d_i + c_y x_i' \\beta d_i + \\zeta_i, & &\\zeta_i \\sim \\mathcal{N}(0,1), - - with covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a matrix with entries - :math:`\\Sigma_{kj} = 0.5^{|j-k|}`. - :math:`\\beta` is a `dim_x`-vector with entries :math:`\\beta_j=\\frac{1}{j^2}` and the constants :math:`c_y` and - :math:`c_d` are given by - - .. math:: - - c_y = \\sqrt{\\frac{R_y^2}{(1-R_y^2) \\beta' \\Sigma \\beta}}, \\qquad c_d = - \\sqrt{\\frac{(\\pi^2 /3) R_d^2}{(1-R_d^2) \\beta' \\Sigma \\beta}}. - - The data generating process is inspired by a process used in the simulation experiment (see Appendix P) of Belloni - et al. (2017). - - Parameters - ---------- - n_obs : - The number of observations to simulate. - dim_x : - The number of covariates. - theta : - The value of the causal parameter. - R2_d : - The value of the parameter :math:`R_d^2`. - R2_y : - The value of the parameter :math:`R_y^2`. - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - - If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d)``. - - References - ---------- - Belloni, A., Chernozhukov, V., Fernández‐Val, I. and Hansen, C. (2017). Program Evaluation and Causal Inference With - High‐Dimensional Data. Econometrica, 85: 233-298. - """ - # inspired by https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA12723, see suplement - v = np.random.uniform( - size=[ - n_obs, - ] - ) - zeta = np.random.standard_normal( - size=[ - n_obs, - ] - ) - - cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - cov_mat, - size=[ - n_obs, - ], - ) - - beta = [1 / (k**2) for k in range(1, dim_x + 1)] - b_sigma_b = np.dot(np.dot(cov_mat, beta), beta) - c_y = np.sqrt(R2_y / ((1 - R2_y) * b_sigma_b)) - c_d = np.sqrt(np.pi**2 / 3.0 * R2_d / ((1 - R2_d) * b_sigma_b)) - - xx = np.exp(np.dot(x, np.multiply(beta, c_d))) - d = 1.0 * ((xx / (1 + xx)) > v) - - y = d * theta + d * np.dot(x, np.multiply(beta, c_y)) + zeta - - if return_type in _array_alias: - return x, y, d - elif return_type in _data_frame_alias + _dml_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] - data = pd.DataFrame(np.column_stack((x, y, d)), columns=x_cols + ["y", "d"]) - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, "y", "d", x_cols) - else: - raise ValueError("Invalid return_type.") - - -def make_iivm_data(n_obs=500, dim_x=20, theta=1.0, alpha_x=0.2, return_type="DoubleMLData"): - """ - Generates data from a interactive IV regression (IIVM) model. - The data generating process is defined as - - .. math:: - - d_i &= 1\\left\\lbrace \\alpha_x Z + v_i > 0 \\right\\rbrace, - - y_i &= \\theta d_i + x_i' \\beta + u_i, - - with :math:`Z \\sim \\text{Bernoulli}(0.5)` and - - .. math:: - - \\left(\\begin{matrix} u_i \\\\ v_i \\end{matrix} \\right) \\sim - \\mathcal{N}\\left(0, \\left(\\begin{matrix} 1 & 0.3 \\\\ 0.3 & 1 \\end{matrix} \\right) \\right). - - The covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a matrix with entries - :math:`\\Sigma_{kj} = 0.5^{|j-k|}` and :math:`\\beta` is a `dim_x`-vector with entries - :math:`\\beta_j=\\frac{1}{j^2}`. - - The data generating process is inspired by a process used in the simulation experiment of Farbmacher, Gruber and - Klaassen (2020). - - Parameters - ---------- - n_obs : - The number of observations to simulate. - dim_x : - The number of covariates. - theta : - The value of the causal parameter. - alpha_x : - The value of the parameter :math:`\\alpha_x`. - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - - If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d, z)``. - - References - ---------- - Farbmacher, H., Guber, R. and Klaaßen, S. (2020). Instrument Validity Tests with Causal Forests. MEA Discussion - Paper No. 13-2020. Available at SSRN: http://dx.doi.org/10.2139/ssrn.3619201. - """ - # inspired by https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3619201 - xx = np.random.multivariate_normal( - np.zeros(2), - np.array([[1.0, 0.3], [0.3, 1.0]]), - size=[ - n_obs, - ], - ) - u = xx[:, 0] - v = xx[:, 1] - - cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - cov_mat, - size=[ - n_obs, - ], - ) - - beta = [1 / (k**2) for k in range(1, dim_x + 1)] - - z = np.random.binomial( - p=0.5, - n=1, - size=[ - n_obs, - ], - ) - d = 1.0 * (alpha_x * z + v > 0) - - y = d * theta + np.dot(x, beta) + u - - if return_type in _array_alias: - return x, y, d, z - elif return_type in _data_frame_alias + _dml_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] - data = pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["y", "d", "z"]) - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, "y", "d", x_cols, "z") - else: - raise ValueError("Invalid return_type.") - - -def _make_pliv_data(n_obs=100, dim_x=20, theta=0.5, gamma_z=0.4, return_type="DoubleMLData"): - b = [1 / k for k in range(1, dim_x + 1)] - sigma = make_spd_matrix(dim_x) - - x = np.random.multivariate_normal( - np.zeros(dim_x), - sigma, - size=[ - n_obs, - ], - ) - G = _g(np.dot(x, b)) - # instrument - z = _m(np.dot(x, b)) + np.random.standard_normal( - size=[ - n_obs, - ] - ) - # treatment - M = _m(gamma_z * z + np.dot(x, b)) - d = M + np.random.standard_normal( - size=[ - n_obs, - ] - ) - y = ( - np.dot(theta, d) - + G - + np.random.standard_normal( - size=[ - n_obs, - ] - ) - ) - - if return_type in _array_alias: - return x, y, d, z - elif return_type in _data_frame_alias + _dml_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] - data = pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["y", "d", "z"]) - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, "y", "d", x_cols, "z") - else: - raise ValueError("Invalid return_type.") - - -def make_pliv_CHS2015(n_obs, alpha=1.0, dim_x=200, dim_z=150, return_type="DoubleMLData"): - """ - Generates data from a partially linear IV regression model used in Chernozhukov, Hansen and Spindler (2015). - The data generating process is defined as - - .. math:: - - z_i &= \\Pi x_i + \\zeta_i, - - d_i &= x_i' \\gamma + z_i' \\delta + u_i, - - y_i &= \\alpha d_i + x_i' \\beta + \\varepsilon_i, - - with - - .. math:: - - \\left(\\begin{matrix} \\varepsilon_i \\\\ u_i \\\\ \\zeta_i \\\\ x_i \\end{matrix} \\right) \\sim - \\mathcal{N}\\left(0, \\left(\\begin{matrix} 1 & 0.6 & 0 & 0 \\\\ 0.6 & 1 & 0 & 0 \\\\ - 0 & 0 & 0.25 I_{p_n^z} & 0 \\\\ 0 & 0 & 0 & \\Sigma \\end{matrix} \\right) \\right) - - where :math:`\\Sigma` is a :math:`p_n^x \\times p_n^x` matrix with entries - :math:`\\Sigma_{kj} = 0.5^{|j-k|}` and :math:`I_{p_n^z}` is the :math:`p_n^z \\times p_n^z` identity matrix. - :math:`\\beta = \\gamma` is a :math:`p_n^x`-vector with entries :math:`\\beta_j=\\frac{1}{j^2}`, - :math:`\\delta` is a :math:`p_n^z`-vector with entries :math:`\\delta_j=\\frac{1}{j^2}` - and :math:`\\Pi = (I_{p_n^z}, 0_{p_n^z \\times (p_n^x - p_n^z)})`. - - Parameters - ---------- - n_obs : - The number of observations to simulate. - alpha : - The value of the causal parameter. - dim_x : - The number of covariates. - dim_z : - The number of instruments. - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - - If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d, z)``. - - References - ---------- - Chernozhukov, V., Hansen, C. and Spindler, M. (2015), Post-Selection and Post-Regularization Inference in Linear - Models with Many Controls and Instruments. American Economic Review: Papers and Proceedings, 105 (5): 486-90. - """ - assert dim_x >= dim_z - # see https://assets.aeaweb.org/asset-server/articles-attachments/aer/app/10505/P2015_1022_app.pdf - xx = np.random.multivariate_normal( - np.zeros(2), - np.array([[1.0, 0.6], [0.6, 1.0]]), - size=[ - n_obs, - ], - ) - epsilon = xx[:, 0] - u = xx[:, 1] - - sigma = toeplitz([np.power(0.5, k) for k in range(0, dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - sigma, - size=[ - n_obs, - ], - ) - - I_z = np.eye(dim_z) - xi = np.random.multivariate_normal( - np.zeros(dim_z), - 0.25 * I_z, - size=[ - n_obs, - ], - ) - - beta = [1 / (k**2) for k in range(1, dim_x + 1)] - gamma = beta - delta = [1 / (k**2) for k in range(1, dim_z + 1)] - Pi = np.hstack((I_z, np.zeros((dim_z, dim_x - dim_z)))) - - z = np.dot(x, np.transpose(Pi)) + xi - d = np.dot(x, gamma) + np.dot(z, delta) + u - y = alpha * d + np.dot(x, beta) + epsilon - - if return_type in _array_alias: - return x, y, d, z - elif return_type in _data_frame_alias + _dml_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] - z_cols = [f"Z{i + 1}" for i in np.arange(dim_z)] - data = pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["y", "d"] + z_cols) - if return_type in _data_frame_alias: - return data - else: - return DoubleMLData(data, "y", "d", x_cols, z_cols) - else: - raise ValueError("Invalid return_type.") - - -def make_pliv_multiway_cluster_CKMS2021(N=25, M=25, dim_X=100, theta=1.0, return_type="DoubleMLClusterData", **kwargs): - """ - Generates data from a partially linear IV regression model with multiway cluster sample used in Chiang et al. - (2021). The data generating process is defined as - - .. math:: - - Z_{ij} &= X_{ij}' \\xi_0 + V_{ij}, - - D_{ij} &= Z_{ij}' \\pi_{10} + X_{ij}' \\pi_{20} + v_{ij}, - - Y_{ij} &= D_{ij} \\theta + X_{ij}' \\zeta_0 + \\varepsilon_{ij}, - - with - - .. math:: - - X_{ij} &= (1 - \\omega_1^X - \\omega_2^X) \\alpha_{ij}^X - + \\omega_1^X \\alpha_{i}^X + \\omega_2^X \\alpha_{j}^X, - - \\varepsilon_{ij} &= (1 - \\omega_1^\\varepsilon - \\omega_2^\\varepsilon) \\alpha_{ij}^\\varepsilon - + \\omega_1^\\varepsilon \\alpha_{i}^\\varepsilon + \\omega_2^\\varepsilon \\alpha_{j}^\\varepsilon, - - v_{ij} &= (1 - \\omega_1^v - \\omega_2^v) \\alpha_{ij}^v - + \\omega_1^v \\alpha_{i}^v + \\omega_2^v \\alpha_{j}^v, - - V_{ij} &= (1 - \\omega_1^V - \\omega_2^V) \\alpha_{ij}^V - + \\omega_1^V \\alpha_{i}^V + \\omega_2^V \\alpha_{j}^V, - - and :math:`\\alpha_{ij}^X, \\alpha_{i}^X, \\alpha_{j}^X \\sim \\mathcal{N}(0, \\Sigma)` - where :math:`\\Sigma` is a :math:`p_x \\times p_x` matrix with entries - :math:`\\Sigma_{kj} = s_X^{|j-k|}`. - Further - - .. math:: - - \\left(\\begin{matrix} \\alpha_{ij}^\\varepsilon \\\\ \\alpha_{ij}^v \\end{matrix}\\right), - \\left(\\begin{matrix} \\alpha_{i}^\\varepsilon \\\\ \\alpha_{i}^v \\end{matrix}\\right), - \\left(\\begin{matrix} \\alpha_{j}^\\varepsilon \\\\ \\alpha_{j}^v \\end{matrix}\\right) - \\sim \\mathcal{N}\\left(0, \\left(\\begin{matrix} 1 & s_{\\varepsilon v} \\\\ - s_{\\varepsilon v} & 1 \\end{matrix} \\right) \\right) - - - and :math:`\\alpha_{ij}^V, \\alpha_{i}^V, \\alpha_{j}^V \\sim \\mathcal{N}(0, 1)`. - - Parameters - ---------- - N : - The number of observations (first dimension). - M : - The number of observations (second dimension). - dim_X : - The number of covariates. - theta : - The value of the causal parameter. - return_type : - If ``'DoubleMLClusterData'`` or ``DoubleMLClusterData``, returns a ``DoubleMLClusterData`` object where - ``DoubleMLClusterData.data`` is a ``pd.DataFrame``. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - - If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s - ``(x, y, d, cluster_vars, z)``. - **kwargs - Additional keyword arguments to set non-default values for the parameters - :math:`\\pi_{10}=1.0`, :math:`\\omega_X = \\omega_{\\varepsilon} = \\omega_V = \\omega_v = (0.25, 0.25)`, - :math:`s_X = s_{\\varepsilon v} = 0.25`, - or the :math:`p_x`-vectors :math:`\\zeta_0 = \\pi_{20} = \\xi_0` with default entries - :math:`(\\zeta_{0})_j = 0.5^j`. - - References - ---------- - Chiang, H. D., Kato K., Ma, Y. and Sasaki, Y. (2021), Multiway Cluster Robust Double/Debiased Machine Learning, - Journal of Business & Economic Statistics, - doi: `10.1080/07350015.2021.1895815 `_, - arXiv:`1909.03489 `_. - """ - # additional parameters specifiable via kwargs - pi_10 = kwargs.get("pi_10", 1.0) - - xx = np.arange(1, dim_X + 1) - zeta_0 = kwargs.get("zeta_0", np.power(0.5, xx)) - pi_20 = kwargs.get("pi_20", np.power(0.5, xx)) - xi_0 = kwargs.get("xi_0", np.power(0.5, xx)) - - omega_X = kwargs.get("omega_X", np.array([0.25, 0.25])) - omega_epsilon = kwargs.get("omega_epsilon", np.array([0.25, 0.25])) - omega_v = kwargs.get("omega_v", np.array([0.25, 0.25])) - omega_V = kwargs.get("omega_V", np.array([0.25, 0.25])) - - s_X = kwargs.get("s_X", 0.25) - s_epsilon_v = kwargs.get("s_epsilon_v", 0.25) - - # use np.tile() and np.repeat() for repeating vectors in different styles, i.e., - # np.tile([v1, v2, v3], 2) [v1, v2, v3, v1, v2, v3] - # np.repeat([v1, v2, v3], 2) [v1, v1, v2, v2, v3, v3] - - alpha_V = np.random.normal(size=(N * M)) - alpha_V_i = np.repeat(np.random.normal(size=N), M) - alpha_V_j = np.tile(np.random.normal(size=M), N) - - cov_mat = np.array([[1, s_epsilon_v], [s_epsilon_v, 1]]) - alpha_eps_v = np.random.multivariate_normal( - np.zeros(2), - cov_mat, - size=[ - N * M, - ], - ) - alpha_eps = alpha_eps_v[:, 0] - alpha_v = alpha_eps_v[:, 1] - - alpha_eps_v_i = np.random.multivariate_normal( - np.zeros(2), - cov_mat, - size=[ - N, - ], - ) - alpha_eps_i = np.repeat(alpha_eps_v_i[:, 0], M) - alpha_v_i = np.repeat(alpha_eps_v_i[:, 1], M) - - alpha_eps_v_j = np.random.multivariate_normal( - np.zeros(2), - cov_mat, - size=[ - M, - ], - ) - alpha_eps_j = np.tile(alpha_eps_v_j[:, 0], N) - alpha_v_j = np.tile(alpha_eps_v_j[:, 1], N) - - cov_mat = toeplitz([np.power(s_X, k) for k in range(dim_X)]) - alpha_X = np.random.multivariate_normal( - np.zeros(dim_X), - cov_mat, - size=[ - N * M, - ], - ) - alpha_X_i = np.repeat( - np.random.multivariate_normal( - np.zeros(dim_X), - cov_mat, - size=[ - N, - ], - ), - M, - axis=0, - ) - alpha_X_j = np.tile( - np.random.multivariate_normal( - np.zeros(dim_X), - cov_mat, - size=[ - M, - ], - ), - (N, 1), - ) - - # generate variables - x = (1 - omega_X[0] - omega_X[1]) * alpha_X + omega_X[0] * alpha_X_i + omega_X[1] * alpha_X_j - - eps = ( - (1 - omega_epsilon[0] - omega_epsilon[1]) * alpha_eps + omega_epsilon[0] * alpha_eps_i + omega_epsilon[1] * alpha_eps_j - ) - - v = (1 - omega_v[0] - omega_v[1]) * alpha_v + omega_v[0] * alpha_v_i + omega_v[1] * alpha_v_j - - V = (1 - omega_V[0] - omega_V[1]) * alpha_V + omega_V[0] * alpha_V_i + omega_V[1] * alpha_V_j - - z = np.matmul(x, xi_0) + V - d = z * pi_10 + np.matmul(x, pi_20) + v - y = d * theta + np.matmul(x, zeta_0) + eps - - cluster_cols = ["cluster_var_i", "cluster_var_j"] - cluster_vars = pd.MultiIndex.from_product([range(N), range(M)]).to_frame(name=cluster_cols).reset_index(drop=True) - - if return_type in _array_alias: - return x, y, d, cluster_vars.values, z - elif return_type in _data_frame_alias + _dml_cluster_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_X)] - data = pd.concat((cluster_vars, pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["Y", "D", "Z"])), axis=1) - if return_type in _data_frame_alias: - return data - else: - return DoubleMLClusterData(data, "Y", "D", cluster_cols, x_cols, "Z") - else: - raise ValueError("Invalid return_type.") - - -def make_confounded_irm_data(n_obs=500, theta=0.0, gamma_a=0.127, beta_a=0.58, linear=False, **kwargs): - """ - Generates counfounded data from an interactive regression model. - - The data generating process is defined as follows (inspired by the Monte Carlo simulation used - in Sant'Anna and Zhao (2020)). - - Let :math:`X= (X_1, X_2, X_3, X_4, X_5)^T \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` corresponds - to the identity matrix. - Further, define :math:`Z_j = (\\tilde{Z_j} - \\mathbb{E}[\\tilde{Z}_j]) / \\sqrt{\\text{Var}(\\tilde{Z}_j)}`, - where - - .. math:: - - \\tilde{Z}_1 &= \\exp(0.5 \\cdot X_1) - - \\tilde{Z}_2 &= 10 + X_2/(1 + \\exp(X_1)) - - \\tilde{Z}_3 &= (0.6 + X_1 \\cdot X_3 / 25)^3 - - \\tilde{Z}_4 &= (20 + X_2 + X_4)^2 - - \\tilde{Z}_5 &= X_5. - - Additionally, generate a confounder :math:`A \\sim \\mathcal{U}[-1, 1]`. - At first, define the propensity score as - - .. math:: - - m(X, A) = P(D=1|X,A) = p(Z) + \\gamma_A \\cdot A - - where - - .. math:: - - p(Z) &= \\frac{\\exp(f_{ps}(Z))}{1 + \\exp(f_{ps}(Z))}, - - f_{ps}(Z) &= 0.75 \\cdot (-Z_1 + 0.1 \\cdot Z_2 -0.25 \\cdot Z_3 - 0.1 \\cdot Z_4). - - and generate the treatment :math:`D = 1\\{m(X, A) \\ge U\\}` with :math:`U \\sim \\mathcal{U}[0, 1]`. - Since :math:`A` is independent of :math:`X`, the short form of the propensity score is given as - - .. math:: - - P(D=1|X) = p(Z). - - Further, generate the outcome of interest :math:`Y` as - - .. math:: - - Y &= \\theta \\cdot D (Z_5 + 1) + g(Z) + \\beta_A \\cdot A + \\varepsilon - - g(Z) &= 2.5 + 0.74 \\cdot Z_1 + 0.25 \\cdot Z_2 + 0.137 \\cdot (Z_3 + Z_4) - - where :math:`\\varepsilon \\sim \\mathcal{N}(0,5)`. - This implies an average treatment effect of :math:`\\theta`. Additionally, the long and short forms of - the conditional expectation take the following forms - - .. math:: - - \\mathbb{E}[Y|D, X, A] &= \\theta \\cdot D (Z_5 + 1) + g(Z) + \\beta_A \\cdot A - - \\mathbb{E}[Y|D, X] &= (\\theta + \\beta_A \\frac{\\mathrm{Cov}(A, D(Z_5 + 1))}{\\mathrm{Var}(D(Z_5 + 1))}) - \\cdot D (Z_5 + 1) + g(Z). - - Consequently, the strength of confounding is determined via :math:`\\gamma_A` and :math:`\\beta_A`, which can be - set via the parameters ``gamma_a`` and ``beta_a``. - - The observed data is given as :math:`W = (Y, D, Z)`. - Further, orcale values of the confounder :math:`A`, the transformed covariated :math:`Z`, - the potential outcomes of :math:`Y`, the long and short forms of the main regression and the propensity score and - in sample versions of the confounding parameters :math:`cf_d` and :math:`cf_y` (for ATE and ATTE) - are returned in a dictionary. - - Parameters - ---------- - n_obs : int - The number of observations to simulate. - Default is ``500``. - theta : float or int - Average treatment effect. - Default is ``0.0``. - gamma_a : float - Coefficient of the unobserved confounder in the propensity score. - Default is ``0.127``. - beta_a : float - Coefficient of the unobserved confounder in the outcome regression. - Default is ``0.58``. - linear : bool - If ``True``, the Z will be set to X, such that the underlying (short) models are linear/logistic. - Default is ``False``. - - Returns - ------- - res_dict : dictionary - Dictionary with entries ``x``, ``y``, ``d`` and ``oracle_values``. - - References - ---------- - Sant’Anna, P. H. and Zhao, J. (2020), - Doubly robust difference-in-differences estimators. Journal of Econometrics, 219(1), 101-122. - doi:`10.1016/j.jeconom.2020.06.003 `_. - """ - c = 0.0 # the confounding strength is only valid for c=0 - xi = 0.75 - dim_x = kwargs.get("dim_x", 5) - trimming_threshold = kwargs.get("trimming_threshold", 0.01) - var_eps_y = kwargs.get("var_eps_y", 1.0) - - # Specification of main regression function - def f_reg(w): - res = 2.5 + 0.74 * w[:, 0] + 0.25 * w[:, 1] + 0.137 * (w[:, 2] + w[:, 3]) - return res - - # Specification of prop score function - def f_ps(w, xi): - res = xi * (-w[:, 0] + 0.1 * w[:, 1] - 0.25 * w[:, 2] - 0.1 * w[:, 3]) - return res - - # observed covariates - cov_mat = toeplitz([np.power(c, k) for k in range(dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - cov_mat, - size=[ - n_obs, - ], - ) - z_tilde_1 = np.exp(0.5 * x[:, 0]) - z_tilde_2 = 10 + x[:, 1] / (1 + np.exp(x[:, 0])) - z_tilde_3 = (0.6 + x[:, 0] * x[:, 2] / 25) ** 3 - z_tilde_4 = (20 + x[:, 1] + x[:, 3]) ** 2 - z_tilde_5 = x[:, 4] - z_tilde = np.column_stack((z_tilde_1, z_tilde_2, z_tilde_3, z_tilde_4, z_tilde_5)) - z = (z_tilde - np.mean(z_tilde, axis=0)) / np.std(z_tilde, axis=0) - # error terms and unobserved confounder - eps_y = np.random.normal(loc=0, scale=np.sqrt(var_eps_y), size=n_obs) - # unobserved confounder - a_bounds = (-1, 1) - a = np.random.uniform(low=a_bounds[0], high=a_bounds[1], size=n_obs) - var_a = np.square(a_bounds[1] - a_bounds[0]) / 12 - - # Choose the features used in the models - if linear: - features_ps = x - features_reg = x - else: - features_ps = z - features_reg = z - - p = np.exp(f_ps(features_ps, xi)) / (1 + np.exp(f_ps(features_ps, xi))) - # compute short and long form of propensity score - m_long = p + gamma_a * a - m_short = p - # check propensity score bounds - if np.any(m_long < trimming_threshold) or np.any(m_long > 1.0 - trimming_threshold): - m_long = np.clip(m_long, trimming_threshold, 1.0 - trimming_threshold) - m_short = np.clip(m_short, trimming_threshold, 1.0 - trimming_threshold) - warnings.warn( - f"Propensity score is close to 0 or 1. " - f"Trimming is at {trimming_threshold} and {1.0 - trimming_threshold} is applied" - ) - # generate treatment based on long form - u = np.random.uniform(low=0, high=1, size=n_obs) - d = 1.0 * (m_long >= u) - # add treatment heterogeneity - d1x = z[:, 4] + 1 - var_dx = np.var(d * (d1x)) - cov_adx = gamma_a * var_a - # Outcome regression - g_partial_reg = f_reg(features_reg) - # short model - g_short_d0 = g_partial_reg - g_short_d1 = (theta + beta_a * cov_adx / var_dx) * d1x + g_partial_reg - g_short = d * g_short_d1 + (1.0 - d) * g_short_d0 - # long model - g_long_d0 = g_partial_reg + beta_a * a - g_long_d1 = theta * d1x + g_partial_reg + beta_a * a - g_long = d * g_long_d1 + (1.0 - d) * g_long_d0 - # Potential outcomes - y_0 = g_long_d0 + eps_y - y_1 = g_long_d1 + eps_y - # Realized outcome - y = d * y_1 + (1.0 - d) * y_0 - # In-sample values for confounding strength - explained_residual_variance = np.square(g_long - g_short) - residual_variance = np.square(y - g_short) - cf_y = np.mean(explained_residual_variance) / np.mean(residual_variance) - # compute the Riesz representation - treated_weight = d / np.mean(d) - untreated_weight = (1.0 - d) / np.mean(d) - # Odds ratios - propensity_ratio_long = m_long / (1.0 - m_long) - rr_long_ate = d / m_long - (1.0 - d) / (1.0 - m_long) - rr_long_atte = treated_weight - np.multiply(untreated_weight, propensity_ratio_long) - propensity_ratio_short = m_short / (1.0 - m_short) - rr_short_ate = d / m_short - (1.0 - d) / (1.0 - m_short) - rr_short_atte = treated_weight - np.multiply(untreated_weight, propensity_ratio_short) - cf_d_ate = (np.mean(1 / (m_long * (1 - m_long))) - np.mean(1 / (m_short * (1 - m_short)))) / np.mean( - 1 / (m_long * (1 - m_long)) - ) - cf_d_atte = (np.mean(propensity_ratio_long) - np.mean(propensity_ratio_short)) / np.mean(propensity_ratio_long) - if (beta_a == 0) | (gamma_a == 0): - rho_ate = 0.0 - rho_atte = 0.0 - else: - rho_ate = np.corrcoef((g_long - g_short), (rr_long_ate - rr_short_ate))[0, 1] - rho_atte = np.corrcoef((g_long - g_short), (rr_long_atte - rr_short_atte))[0, 1] - oracle_values = { - "g_long": g_long, - "g_short": g_short, - "m_long": m_long, - "m_short": m_short, - "gamma_a": gamma_a, - "beta_a": beta_a, - "a": a, - "y_0": y_0, - "y_1": y_1, - "z": z, - "cf_y": cf_y, - "cf_d_ate": cf_d_ate, - "cf_d_atte": cf_d_atte, - "rho_ate": rho_ate, - "rho_atte": rho_atte, - } - res_dict = {"x": x, "y": y, "d": d, "oracle_values": oracle_values} - return res_dict - - -def make_confounded_plr_data(n_obs=500, theta=5.0, cf_y=0.04, cf_d=0.04, **kwargs): - """ - Generates counfounded data from an partially linear regression model. - - The data generating process is defined as follows (similar to the Monte Carlo simulation used - in Sant'Anna and Zhao (2020)). Let :math:`X= (X_1, X_2, X_3, X_4, X_5)^T \\sim \\mathcal{N}(0, \\Sigma)`, - where :math:`\\Sigma` is a matrix with entries - :math:`\\Sigma_{kj} = c^{|j-k|}`. The default value is :math:`c = 0`, corresponding to the identity matrix. - Further, define :math:`Z_j = (\\tilde{Z_j} - \\mathbb{E}[\\tilde{Z}_j]) / \\sqrt{\\text{Var}(\\tilde{Z}_j)}`, - where - - .. math:: - - \\tilde{Z}_1 &= \\exp(0.5 \\cdot X_1) - - \\tilde{Z}_2 &= 10 + X_2/(1 + \\exp(X_1)) - - \\tilde{Z}_3 &= (0.6 + X_1 \\cdot X_3 / 25)^3 - - \\tilde{Z}_4 &= (20 + X_2 + X_4)^2. - - Additionally, generate a confounder :math:`A \\sim \\mathcal{U}[-1, 1]`. - At first, define the treatment as - - .. math:: - - D = -Z_1 + 0.5 \\cdot Z_2 - 0.25 \\cdot Z_3 - 0.1 \\cdot Z_4 + \\gamma_A \\cdot A + \\varepsilon_D - - and with :math:`\\varepsilon \\sim \\mathcal{N}(0,1)`. - Since :math:`A` is independent of :math:`X`, the long and short form of the treatment regression are given as - - .. math:: - - E[D|X,A] = -Z_1 + 0.5 \\cdot Z_2 - 0.25 \\cdot Z_3 - 0.1 \\cdot Z_4 + \\gamma_A \\cdot A - - E[D|X] = -Z_1 + 0.5 \\cdot Z_2 - 0.25 \\cdot Z_3 - 0.1 \\cdot Z_4. - - Further, generate the outcome of interest :math:`Y` as - - .. math:: - - Y &= \\theta \\cdot D + g(Z) + \\beta_A \\cdot A + \\varepsilon - - g(Z) &= 210 + 27.4 \\cdot Z_1 +13.7 \\cdot (Z_2 + Z_3 + Z_4) - - where :math:`\\varepsilon \\sim \\mathcal{N}(0,5)`. - This implies an average treatment effect of :math:`\\theta`. Additionally, the long and short forms of - the conditional expectation take the following forms - - .. math:: - - \\mathbb{E}[Y|D, X, A] &= \\theta \\cdot D + g(Z) + \\beta_A \\cdot A - - \\mathbb{E}[Y|D, X] &= (\\theta + \\gamma_A\\beta_A \\frac{\\mathrm{Var}(A)}{\\mathrm{Var}(D)}) \\cdot D + g(Z). - - Consequently, the strength of confounding is determined via :math:`\\gamma_A` and :math:`\\beta_A`. - Both are chosen to obtain the desired confounding of the outcome and Riesz Representer (in sample). - - The observed data is given as :math:`W = (Y, D, X)`. - Further, orcale values of the confounder :math:`A`, the transformed covariated :math:`Z`, the effect :math:`\\theta`, - the coefficients :math:`\\gamma_a`, :math:`\\beta_a`, the long and short forms of the main regression and - the propensity score are returned in a dictionary. - - Parameters - ---------- - n_obs : int - The number of observations to simulate. - Default is ``500``. - theta : float or int - Average treatment effect. - Default is ``5.0``. - cf_y : float - Percentage of the residual variation of the outcome explained by latent/confounding variable. - Default is ``0.04``. - cf_d : float - Percentage gains in the variation of the Riesz Representer generated by latent/confounding variable. - Default is ``0.04``. - - Returns - ------- - res_dict : dictionary - Dictionary with entries ``x``, ``y``, ``d`` and ``oracle_values``. - - References - ---------- - Sant’Anna, P. H. and Zhao, J. (2020), - Doubly robust difference-in-differences estimators. Journal of Econometrics, 219(1), 101-122. - doi:`10.1016/j.jeconom.2020.06.003 `_. - """ - c = kwargs.get("c", 0.0) - dim_x = kwargs.get("dim_x", 4) - - # observed covariates - cov_mat = toeplitz([np.power(c, k) for k in range(dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - cov_mat, - size=[ - n_obs, - ], - ) - - z_tilde_1 = np.exp(0.5 * x[:, 0]) - z_tilde_2 = 10 + x[:, 1] / (1 + np.exp(x[:, 0])) - z_tilde_3 = (0.6 + x[:, 0] * x[:, 2] / 25) ** 3 - z_tilde_4 = (20 + x[:, 1] + x[:, 3]) ** 2 - - z_tilde = np.column_stack((z_tilde_1, z_tilde_2, z_tilde_3, z_tilde_4, x[:, 4:])) - z = (z_tilde - np.mean(z_tilde, axis=0)) / np.std(z_tilde, axis=0) - - # error terms - var_eps_y = 5 - eps_y = np.random.normal(loc=0, scale=np.sqrt(var_eps_y), size=n_obs) - var_eps_d = 1 - eps_d = np.random.normal(loc=0, scale=np.sqrt(var_eps_d), size=n_obs) - - # unobserved confounder - a_bounds = (-1, 1) - a = np.random.uniform(low=a_bounds[0], high=a_bounds[1], size=n_obs) - var_a = np.square(a_bounds[1] - a_bounds[0]) / 12 - - # get the required impact of the confounder on the propensity score - m_short = -z[:, 0] + 0.5 * z[:, 1] - 0.25 * z[:, 2] - 0.1 * z[:, 3] - - def f_m(gamma_a): - rr_long = eps_d / var_eps_d - rr_short = (gamma_a * a + eps_d) / (gamma_a**2 * var_a + var_eps_d) - C2_D = (np.mean(np.square(rr_long)) - np.mean(np.square(rr_short))) / np.mean(np.square(rr_short)) - return np.square(C2_D / (1 + C2_D) - cf_d) - - gamma_a = minimize_scalar(f_m).x - m_long = m_short + gamma_a * a - d = m_long + eps_d - - # short and long version of g - g_partial_reg = 210 + 27.4 * z[:, 0] + 13.7 * (z[:, 1] + z[:, 2] + z[:, 3]) - - var_d = np.var(d) - - def f_g(beta_a): - g_diff = beta_a * (a - gamma_a * (var_a / var_d) * d) - y_diff = eps_y + g_diff - return np.square(np.mean(np.square(g_diff)) / np.mean(np.square(y_diff)) - cf_y) - - beta_a = minimize_scalar(f_g).x - - g_long = theta * d + g_partial_reg + beta_a * a - g_short = (theta + gamma_a * beta_a * var_a / var_d) * d + g_partial_reg - - y = g_long + eps_y - - oracle_values = { - "g_long": g_long, - "g_short": g_short, - "m_long": m_long, - "m_short": m_short, - "theta": theta, - "gamma_a": gamma_a, - "beta_a": beta_a, - "a": a, - "z": z, - } - - res_dict = {"x": x, "y": y, "d": d, "oracle_values": oracle_values} - - return res_dict - - -def make_heterogeneous_data(n_obs=200, p=30, support_size=5, n_x=1, binary_treatment=False): - """ - Creates a simple synthetic example for heterogeneous treatment effects. - The data generating process is based on the Monte Carlo simulation from Oprescu et al. (2019). - - The data is generated as - - .. math:: - - Y_i & = \\theta_0(X_i)D_i + \\langle X_i,\\gamma_0\\rangle + \\epsilon_i - - D_i & = \\langle X_i,\\beta_0\\rangle + \\eta_i, - - where :math:`X_i\\sim\\mathcal{U}[0,1]^{p}` and :math:`\\epsilon_i,\\eta_i - \\sim\\mathcal{U}[-1,1]`. - If the treatment is set to be binary, the treatment is generated as - - .. math:: - D_i = 1\\{\\langle X_i,\\beta_0\\rangle \\ge \\eta_i\\}. - - The coefficient vectors :math:`\\gamma_0` and :math:`\\beta_0` both have small random (identical) support - which values are drawn independently from :math:`\\mathcal{U}[0,1]` and :math:`\\mathcal{U}[0,0.3]`. - Further, :math:`\\theta_0(x)` defines the conditional treatment effect, which is defined differently depending - on the dimension of :math:`x`. - - If the heterogeneity is univariate the conditional treatment effect takes the following form - - .. math:: - \\theta_0(x) = \\exp(2x_0) + 3\\sin(4x_0), - - whereas for the two-dimensional case the conditional treatment effect is defined as - - .. math:: - \\theta_0(x) = \\exp(2x_0) + 3\\sin(4x_1). - - Parameters - ---------- - n_obs : int - Number of observations to simulate. - Default is ``200``. - - p : int - Dimension of covariates. - Default is ``30``. - - support_size : int - Number of relevant (confounding) covariates. - Default is ``5``. - - n_x : int - Dimension of the heterogeneity. Can be either ``1`` or ``2``. - Default is ``1``. - - binary_treatment : bool - Indicates whether the treatment is binary. - Default is ``False``. - - Returns - ------- - res_dict : dictionary - Dictionary with entries ``data``, ``effects``, ``treatment_effect``. - - """ - # simple input checks - assert n_x in [1, 2], "n_x must be either 1 or 2." - assert support_size <= p, "support_size must be smaller than p." - assert isinstance(binary_treatment, bool), "binary_treatment must be a boolean." - - # define treatment effects - if n_x == 1: - - def treatment_effect(x): - return np.exp(2 * x[:, 0]) + 3 * np.sin(4 * x[:, 0]) - - else: - assert n_x == 2 - - # redefine treatment effect - def treatment_effect(x): - return np.exp(2 * x[:, 0]) + 3 * np.sin(4 * x[:, 1]) - - # Outcome support and coefficients - support_y = np.random.choice(np.arange(p), size=support_size, replace=False) - coefs_y = np.random.uniform(0, 1, size=support_size) - # treatment support and coefficients - support_d = support_y - coefs_d = np.random.uniform(0, 0.3, size=support_size) - - # noise - epsilon = np.random.uniform(-1, 1, size=n_obs) - eta = np.random.uniform(-1, 1, size=n_obs) - - # Generate controls, covariates, treatments and outcomes - x = np.random.uniform(0, 1, size=(n_obs, p)) - # Heterogeneous treatment effects - te = treatment_effect(x) - if binary_treatment: - d = 1.0 * (np.dot(x[:, support_d], coefs_d) >= eta) - else: - d = np.dot(x[:, support_d], coefs_d) + eta - y = te * d + np.dot(x[:, support_y], coefs_y) + epsilon - - # Now we build the dataset - y_df = pd.DataFrame({"y": y}) - d_df = pd.DataFrame({"d": d}) - x_df = pd.DataFrame(data=x, index=np.arange(x.shape[0]), columns=[f"X_{i}" for i in range(x.shape[1])]) - - data = pd.concat([y_df, d_df, x_df], axis=1) - res_dict = {"data": data, "effects": te, "treatment_effect": treatment_effect} - return res_dict - - -def make_ssm_data(n_obs=8000, dim_x=100, theta=1, mar=True, return_type="DoubleMLData"): - """ - Generates data from a sample selection model (SSM). - The data generating process is defined as - - .. math:: - - y_i &= \\theta d_i + x_i' \\beta d_i + u_i, - - s_i &= 1\\left\\lbrace d_i + \\gamma z_i + x_i' \\beta + v_i > 0 \\right\\rbrace, - - d_i &= 1\\left\\lbrace x_i' \\beta + w_i > 0 \\right\\rbrace, - - with Y being observed if :math:`s_i = 1` and covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma^2_x)`, where - :math:`\\Sigma^2_x` is a matrix with entries - :math:`\\Sigma_{kj} = 0.5^{|j-k|}`. - :math:`\\beta` is a `dim_x`-vector with entries :math:`\\beta_j=\\frac{0.4}{j^2}` - :math:`z_i \\sim \\mathcal{N}(0, 1)`, - :math:`(u_i,v_i) \\sim \\mathcal{N}(0, \\Sigma^2_{u,v})`, - :math:`w_i \\sim \\mathcal{N}(0, 1)`. - - - The data generating process is inspired by a process used in the simulation study (see Appendix E) of Bia, - Huber and Lafférs (2023). - - Parameters - ---------- - n_obs : - The number of observations to simulate. - dim_x : - The number of covariates. - theta : - The value of the causal parameter. - mar: - Boolean. Indicates whether missingness at random holds. - return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. - - If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. - - If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d, z, s)``. - - References - ---------- - Michela Bia, Martin Huber & Lukáš Lafférs (2023) Double Machine Learning for Sample Selection Models, - Journal of Business & Economic Statistics, DOI: 10.1080/07350015.2023.2271071 - """ - if mar: - sigma = np.array([[1, 0], [0, 1]]) - gamma = 0 - else: - sigma = np.array([[1, 0.8], [0.8, 1]]) - gamma = 1 - - e = np.random.multivariate_normal(mean=[0, 0], cov=sigma, size=n_obs).T - - cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - cov_mat, - size=[ - n_obs, - ], - ) - - beta = [0.4 / (k**2) for k in range(1, dim_x + 1)] - - d = np.where(np.dot(x, beta) + np.random.randn(n_obs) > 0, 1, 0) - z = np.random.randn(n_obs) - s = np.where(np.dot(x, beta) + d + gamma * z + e[0] > 0, 1, 0) - - y = np.dot(x, beta) + theta * d + e[1] - y[s == 0] = 0 - - if return_type in _array_alias: - return x, y, d, z, s - elif return_type in _data_frame_alias + _dml_data_alias: - x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] - if mar: - data = pd.DataFrame(np.column_stack((x, y, d, s)), columns=x_cols + ["y", "d", "s"]) - else: - data = pd.DataFrame(np.column_stack((x, y, d, z, s)), columns=x_cols + ["y", "d", "z", "s"]) - if return_type in _data_frame_alias: - return data - else: - if mar: - return DoubleMLData(data, "y", "d", x_cols, None, None, "s") - return DoubleMLData(data, "y", "d", x_cols, "z", None, "s") - else: - raise ValueError("Invalid return_type.") - - -def make_irm_data_discrete_treatments(n_obs=200, n_levels=3, linear=False, random_state=None, **kwargs): - """ - Generates data from a interactive regression (IRM) model with multiple treatment levels (based on an - underlying continous treatment). - - The data generating process is defined as follows (similar to the Monte Carlo simulation used - in Sant'Anna and Zhao (2020)). - - Let :math:`X= (X_1, X_2, X_3, X_4, X_5)^T \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` corresponds - to the identity matrix. - Further, define :math:`Z_j = (\\tilde{Z_j} - \\mathbb{E}[\\tilde{Z}_j]) / \\sqrt{\\text{Var}(\\tilde{Z}_j)}`, - where - - .. math:: - - \\tilde{Z}_1 &= \\exp(0.5 \\cdot X_1) - - \\tilde{Z}_2 &= 10 + X_2/(1 + \\exp(X_1)) - - \\tilde{Z}_3 &= (0.6 + X_1 \\cdot X_3 / 25)^3 - - \\tilde{Z}_4 &= (20 + X_2 + X_4)^2 - - \\tilde{Z}_5 &= X_5. - - A continuous treatment :math:`D_{\\text{cont}}` is generated as - - .. math:: - - D_{\\text{cont}} = \\xi (-Z_1 + 0.5 Z_2 - 0.25 Z_3 - 0.1 Z_4) + \\varepsilon_D, - - where :math:`\\varepsilon_D \\sim \\mathcal{N}(0,1)` and :math:`\\xi=0.3`. The corresponding treatment - effect is defined as - - .. math:: - - \\theta (d) = 0.1 \\exp(d) + 10 \\sin(0.7 d) + 2 d - 0.2 d^2. - - Based on the continous treatment, a discrete treatment :math:`D` is generated as with a baseline level of - :math:`D=0` and additional levels based on the quantiles of :math:`D_{\\text{cont}}`. The number of levels - is defined by :math:`n_{\\text{levels}}`. Each level is chosen to have the same probability of being selected. - - The potential outcomes are defined as - - .. math:: - - Y(0) &= 210 + 27.4 Z_1 + 13.7 (Z_2 + Z_3 + Z_4) + \\varepsilon_Y - - Y(1) &= \\theta (D_{\\text{cont}}) 1\\{D_{\\text{cont}} > 0\\} + Y(0), - - where :math:`\\varepsilon_Y \\sim \\mathcal{N}(0,5)`. Further, the observed outcome is defined as - - .. math:: - - Y = Y(1) 1\\{D > 0\\} + Y(0) 1\\{D = 0\\}. - - The data is returned as a dictionary with the entries ``x``, ``y``, ``d`` and ``oracle_values``. - - Parameters - ---------- - n_obs : int - The number of observations to simulate. - Default is ``200``. - - n_levels : int - The number of treatment levels. - Default is ``3``. - - linear : bool - Indicates whether the true underlying regression is linear. - Default is ``False``. - - random_state : int - Random seed for reproducibility. - Default is ``42``. - - Returns - ------- - res_dict : dictionary - Dictionary with entries ``x``, ``y``, ``d`` and ``oracle_values``. - The oracle values contain the continuous treatment, the level bounds, the potential level, ITE - and the potential outcome without treatment. - - """ - if random_state is not None: - np.random.seed(random_state) - xi = kwargs.get("xi", 0.3) - c = kwargs.get("c", 0.0) - dim_x = kwargs.get("dim_x", 5) - - if not isinstance(n_levels, int): - raise ValueError("n_levels must be an integer.") - if n_levels < 2: - raise ValueError("n_levels must be at least 2.") - - # observed covariates - cov_mat = toeplitz([np.power(c, k) for k in range(dim_x)]) - x = np.random.multivariate_normal( - np.zeros(dim_x), - cov_mat, - size=[ - n_obs, - ], - ) - - def f_reg(w): - res = 210 + 27.4 * w[:, 0] + 13.7 * (w[:, 1] + w[:, 2] + w[:, 3]) - return res - - def f_treatment(w, xi): - res = xi * (-w[:, 0] + 0.5 * w[:, 1] - 0.25 * w[:, 2] - 0.1 * w[:, 3]) - return res - - def treatment_effect(d, scale=15): - return scale * (1 / (1 + np.exp(-d - 1.2 * np.cos(d)))) - 2 - - z_tilde_1 = np.exp(0.5 * x[:, 0]) - z_tilde_2 = 10 + x[:, 1] / (1 + np.exp(x[:, 0])) - z_tilde_3 = (0.6 + x[:, 0] * x[:, 2] / 25) ** 3 - z_tilde_4 = (20 + x[:, 1] + x[:, 3]) ** 2 - - z_tilde = np.column_stack((z_tilde_1, z_tilde_2, z_tilde_3, z_tilde_4, x[:, 4:])) - z = (z_tilde - np.mean(z_tilde, axis=0)) / np.std(z_tilde, axis=0) - - # error terms - var_eps_y = 5 - eps_y = np.random.normal(loc=0, scale=np.sqrt(var_eps_y), size=n_obs) - var_eps_d = 1 - eps_d = np.random.normal(loc=0, scale=np.sqrt(var_eps_d), size=n_obs) - - if linear: - g = f_reg(x) - m = f_treatment(x, xi) - else: - assert not linear - g = f_reg(z) - m = f_treatment(z, xi) - - cont_d = m + eps_d - level_bounds = np.quantile(cont_d, q=np.linspace(0, 1, n_levels + 1)) - potential_level = sum([1.0 * (cont_d >= bound) for bound in level_bounds[1:-1]]) + 1 - eta = np.random.uniform(0, 1, size=n_obs) - d = 1.0 * (eta >= 1 / n_levels) * potential_level - - ite = treatment_effect(cont_d) - y0 = g + eps_y - # only treated for d > 0 compared to the baseline - y = ite * (d > 0) + y0 - - oracle_values = { - "cont_d": cont_d, - "level_bounds": level_bounds, - "potential_level": potential_level, - "ite": ite, - "y0": y0, - } - - resul_dict = {"x": x, "y": y, "d": d, "oracle_values": oracle_values} - - return resul_dict diff --git a/doubleml/datasets/__init__.py b/doubleml/datasets/__init__.py new file mode 100644 index 00000000..b09d693d --- /dev/null +++ b/doubleml/datasets/__init__.py @@ -0,0 +1,13 @@ +""" +The :mod:`doubleml.datasets` module implements data generating processes for double machine learning simulations +and provides access to real datasets. +""" + +# Import fetch functions +from .fetch_401K import fetch_401K +from .fetch_bonus import fetch_bonus + +__all__ = [ + "fetch_401K", + "fetch_bonus", +] diff --git a/doubleml/datasets/fetch_401K.py b/doubleml/datasets/fetch_401K.py new file mode 100644 index 00000000..6d99589e --- /dev/null +++ b/doubleml/datasets/fetch_401K.py @@ -0,0 +1,65 @@ +""" +Data set on financial wealth and 401(k) plan participation. +""" + +import pandas as pd + +from doubleml import DoubleMLData + + +def _get_array_alias(): + return ["array", "np.array", "np.ndarray"] + + +def _get_data_frame_alias(): + return ["DataFrame", "pd.DataFrame", "pandas.DataFrame"] + + +def _get_dml_data_alias(): + return ["DoubleMLData"] + + +def fetch_401K(return_type="DoubleMLData", polynomial_features=False): + """ + Data set on financial wealth and 401(k) plan participation. + + Parameters + ---------- + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + polynomial_features : + If ``True`` polynomial features are added (see replication files of Chernozhukov et al. (2018)). + + References + ---------- + Abadie, A. (2003), Semiparametric instrumental variable estimation of treatment response models. Journal of + Econometrics, 113(2): 231-263. + + Chernozhukov, V., Chetverikov, D., Demirer, M., Duflo, E., Hansen, C., Newey, W. and Robins, J. (2018), + Double/debiased machine learning for treatment and structural parameters. The Econometrics Journal, 21: C1-C68. + doi:`10.1111/ectj.12097 `_. + """ + _data_frame_alias = _get_data_frame_alias() + _dml_data_alias = _get_dml_data_alias() + + url = "https://github.com/VC2015/DMLonGitHub/raw/master/sipp1991.dta" + raw_data = pd.read_stata(url) + + y_col = "net_tfa" + d_cols = ["e401"] + x_cols = ["age", "inc", "educ", "fsize", "marr", "twoearn", "db", "pira", "hown"] + + data = raw_data.copy() + + if polynomial_features: + raise NotImplementedError("polynomial_features os not implemented yet for fetch_401K.") + + if return_type in _data_frame_alias + _dml_data_alias: + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, y_col, d_cols, x_cols) + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/datasets/fetch_bonus.py b/doubleml/datasets/fetch_bonus.py new file mode 100644 index 00000000..7d803414 --- /dev/null +++ b/doubleml/datasets/fetch_bonus.py @@ -0,0 +1,98 @@ +""" +Data set on the Pennsylvania Reemployment Bonus experiment. +""" + +import numpy as np +import pandas as pd +from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures + +from doubleml import DoubleMLData + + +def _get_array_alias(): + return ["array", "np.array", "np.ndarray"] + + +def _get_data_frame_alias(): + return ["DataFrame", "pd.DataFrame", "pandas.DataFrame"] + + +def _get_dml_data_alias(): + return ["DoubleMLData"] + + +def fetch_bonus(return_type="DoubleMLData", polynomial_features=False): + """ + Data set on the Pennsylvania Reemployment Bonus experiment. + + Parameters + ---------- + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + polynomial_features : + If ``True`` polynomial features are added (see replication files of Chernozhukov et al. (2018)). + + References + ---------- + Bilias Y. (2000), Sequential Testing of Duration Data: The Case of Pennsylvania 'Reemployment Bonus' Experiment. + Journal of Applied Econometrics, 15(6): 575-594. + + Chernozhukov, V., Chetverikov, D., Demirer, M., Duflo, E., Hansen, C., Newey, W. and Robins, J. (2018), + Double/debiased machine learning for treatment and structural parameters. The Econometrics Journal, 21: C1-C68. + doi:`10.1111/ectj.12097 `_. + """ + _data_frame_alias = _get_data_frame_alias() + _dml_data_alias = _get_dml_data_alias() + + url = "https://raw.githubusercontent.com/VC2015/DMLonGitHub/master/penn_jae.dat" + raw_data = pd.read_csv(url, sep=r"\s+") + + ind = (raw_data["tg"] == 0) | (raw_data["tg"] == 4) + data = raw_data.copy()[ind] + data.reset_index(inplace=True) + data["tg"] = data["tg"].replace(4, 1) + data["inuidur1"] = np.log(data["inuidur1"]) + + # variable dep as factor (dummy encoding) + dummy_enc = OneHotEncoder(drop="first", categories="auto").fit(data.loc[:, ["dep"]]) + xx = dummy_enc.transform(data.loc[:, ["dep"]]).toarray() + data["dep1"] = xx[:, 0] + data["dep2"] = xx[:, 1] + + y_col = "inuidur1" + d_cols = ["tg"] + x_cols = [ + "female", + "black", + "othrace", + "dep1", + "dep2", + "q2", + "q3", + "q4", + "q5", + "q6", + "agelt35", + "agegt54", + "durable", + "lusd", + "husd", + ] + + if polynomial_features: + poly = PolynomialFeatures(2, include_bias=False) + data_transf = poly.fit_transform(data[x_cols]) + x_cols = list(poly.get_feature_names_out(x_cols)) + + data_transf = pd.DataFrame(data_transf, columns=x_cols) + data = pd.concat((data[[y_col] + d_cols], data_transf), axis=1, sort=False) + + if return_type in _data_frame_alias + _dml_data_alias: + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, y_col, d_cols, x_cols) + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/did/datasets/dgp_did_SZ2020.py b/doubleml/did/datasets/dgp_did_SZ2020.py index ccfd4a80..af46f4ab 100644 --- a/doubleml/did/datasets/dgp_did_SZ2020.py +++ b/doubleml/did/datasets/dgp_did_SZ2020.py @@ -2,13 +2,14 @@ import pandas as pd from scipy.linalg import toeplitz -from ...data.base_data import DoubleMLData +from ...data.did_data import DoubleMLDIDData from ...data.panel_data import DoubleMLPanelData -from ...utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_data_alias +from ...utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_did_data_alias, _get_dml_panel_data_alias _array_alias = _get_array_alias() _data_frame_alias = _get_data_frame_alias() -_dml_data_alias = _get_dml_data_alias() +_dml_did_data_alias = _get_dml_did_data_alias() +_dml_panel_data_alias = _get_dml_panel_data_alias() def _generate_features(n_obs, c, dim_x=4): @@ -60,7 +61,7 @@ def _f_ps(w, xi): return res -def make_did_SZ2020(n_obs=500, dgp_type=1, cross_sectional_data=False, return_type="DoubleMLData", **kwargs): +def make_did_SZ2020(n_obs=500, dgp_type=1, cross_sectional_data=False, return_type="DoubleMLDIDData", **kwargs): """ Generates data from a difference-in-differences model used in Sant'Anna and Zhao (2020). The data generating process is defined as follows. For a generic :math:`W=(W_1, W_2, W_3, W_4)^T`, let @@ -130,7 +131,7 @@ def make_did_SZ2020(n_obs=500, dgp_type=1, cross_sectional_data=False, return_ty cross_sectional_data : Indicates whether the setting is uses cross-sectional or panel data. Default value is ``False``. return_type : - If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + If ``'DoubleMLDIDData'`` or ``DoubleMLDIDData``, returns a ``DoubleMLDIDData`` object. If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. @@ -181,14 +182,14 @@ def make_did_SZ2020(n_obs=500, dgp_type=1, cross_sectional_data=False, return_ty if return_type in _array_alias: return z, y, d, None - elif return_type in _data_frame_alias + _dml_data_alias: + elif return_type in _data_frame_alias + _dml_did_data_alias: z_cols = [f"Z{i + 1}" for i in np.arange(dim_x)] data = pd.DataFrame(np.column_stack((z, y, d)), columns=z_cols + ["y", "d"]) if return_type in _data_frame_alias: return data else: - return DoubleMLData(data, "y", "d", z_cols) - elif return_type == "DoubleMLPanelData": + return DoubleMLDIDData(data, y_col="y", d_cols="d", x_cols=z_cols) + elif return_type in _dml_panel_data_alias: z_cols = [f"Z{i + 1}" for i in np.arange(dim_x)] df0 = ( pd.DataFrame( @@ -216,7 +217,7 @@ def make_did_SZ2020(n_obs=500, dgp_type=1, cross_sectional_data=False, return_ty ) df = pd.concat([df0, df1], axis=0) - return DoubleMLPanelData(df, "y", "d", t_col="t", id_col="id", x_cols=z_cols) + return DoubleMLPanelData(df, y_col="y", d_cols="d", t_col="t", id_col="id", x_cols=z_cols) else: raise ValueError("Invalid return_type.") @@ -227,12 +228,13 @@ def make_did_SZ2020(n_obs=500, dgp_type=1, cross_sectional_data=False, return_ty if return_type in _array_alias: return z, y, d, t - elif return_type in _data_frame_alias + _dml_data_alias: + elif return_type in _data_frame_alias + _dml_did_data_alias: z_cols = [f"Z{i + 1}" for i in np.arange(dim_x)] data = pd.DataFrame(np.column_stack((z, y, d, t)), columns=z_cols + ["y", "d", "t"]) if return_type in _data_frame_alias: return data - else: - return DoubleMLData(data, "y", "d", z_cols, t_col="t") + elif return_type in _dml_did_data_alias: + return DoubleMLDIDData(data, y_col="y", d_cols="d", x_cols=z_cols, t_col="t") else: raise ValueError("Invalid return_type.") + return None diff --git a/doubleml/did/did.py b/doubleml/did/did.py index 56bfe79c..1e56ccd8 100644 --- a/doubleml/did/did.py +++ b/doubleml/did/did.py @@ -4,7 +4,7 @@ from sklearn.utils import check_X_y from sklearn.utils.multiclass import type_of_target -from doubleml.data.base_data import DoubleMLData +from doubleml.data.did_data import DoubleMLDIDData from doubleml.double_ml import DoubleML from doubleml.double_ml_score_mixins import LinearScoreMixin from doubleml.utils._checks import _check_finite_predictions, _check_is_propensity, _check_score, _check_trimming @@ -17,8 +17,8 @@ class DoubleMLDID(LinearScoreMixin, DoubleML): Parameters ---------- - obj_dml_data : :class:`DoubleMLData` object - The :class:`DoubleMLData` object providing the data and specifying the variables for the causal model. + obj_dml_data : :class:`DoubleMLDIDData` object + The :class:`DoubleMLDIDData` object providing the data and specifying the variables for the causal model. ml_g : estimator implementing ``fit()`` and ``predict()`` A machine learner implementing ``fit()`` and ``predict()`` methods (e.g. @@ -66,13 +66,13 @@ class DoubleMLDID(LinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_did_SZ2020 + >>> from doubleml.did.datasets import make_did_SZ2020 >>> from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier >>> np.random.seed(42) >>> ml_g = RandomForestRegressor(n_estimators=100, max_depth=5, min_samples_leaf=5) >>> ml_m = RandomForestClassifier(n_estimators=100, max_depth=5, min_samples_leaf=5) >>> data = make_did_SZ2020(n_obs=500, return_type='DataFrame') - >>> obj_dml_data = dml.DoubleMLData(data, 'y', 'd') + >>> obj_dml_data = dml.DoubleMLDIDData(data, 'y', 'd') >>> dml_did_obj = dml.DoubleMLDID(obj_dml_data, ml_g, ml_m) >>> dml_did_obj.fit().summary coef std err t P>|t| 2.5 % 97.5 % @@ -177,9 +177,9 @@ def _initialize_ml_nuisance_params(self): self._params = {learner: {key: [None] * self.n_rep for key in self._dml_data.d_cols} for learner in valid_learner} def _check_data(self, obj_dml_data): - if not isinstance(obj_dml_data, DoubleMLData): + if not isinstance(obj_dml_data, DoubleMLDIDData): raise TypeError( - "For repeated outcomes the data must be of DoubleMLData type. " + "For repeated outcomes the data must be of DoubleMLDIDData type. " f"{str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed." ) if obj_dml_data.z_cols is not None: diff --git a/doubleml/did/did_cs.py b/doubleml/did/did_cs.py index aa97996f..7cba006e 100644 --- a/doubleml/did/did_cs.py +++ b/doubleml/did/did_cs.py @@ -4,7 +4,7 @@ from sklearn.utils import check_X_y from sklearn.utils.multiclass import type_of_target -from doubleml.data.base_data import DoubleMLData +from doubleml.data.did_data import DoubleMLDIDData from doubleml.double_ml import DoubleML from doubleml.double_ml_score_mixins import LinearScoreMixin from doubleml.utils._checks import _check_finite_predictions, _check_is_propensity, _check_score, _check_trimming @@ -17,8 +17,8 @@ class DoubleMLDIDCS(LinearScoreMixin, DoubleML): Parameters ---------- - obj_dml_data : :class:`DoubleMLData` object - The :class:`DoubleMLData` object providing the data and specifying the variables for the causal model. + obj_dml_data : :class:`DoubleMLDIDData` object + The :class:`DoubleMLDIDData` object providing the data and specifying the variables for the causal model. ml_g : estimator implementing ``fit()`` and ``predict()`` A machine learner implementing ``fit()`` and ``predict()`` methods (e.g. @@ -63,16 +63,15 @@ class DoubleMLDIDCS(LinearScoreMixin, DoubleML): Default is ``True``. Examples - -------- - >>> import numpy as np + -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_did_SZ2020 + >>> from doubleml.did.datasets import make_did_SZ2020 >>> from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier >>> np.random.seed(42) >>> ml_g = RandomForestRegressor(n_estimators=100, max_depth=5, min_samples_leaf=5) >>> ml_m = RandomForestClassifier(n_estimators=100, max_depth=5, min_samples_leaf=5) >>> data = make_did_SZ2020(n_obs=500, cross_sectional_data=True, return_type='DataFrame') - >>> obj_dml_data = dml.DoubleMLData(data, 'y', 'd', t_col='t') + >>> obj_dml_data = dml.DoubleMLDIDData(data, 'y', 'd', t_col='t') >>> dml_did_obj = dml.DoubleMLDIDCS(obj_dml_data, ml_g, ml_m) >>> dml_did_obj.fit().summary coef std err t P>|t| 2.5 % 97.5 % @@ -178,9 +177,9 @@ def _initialize_ml_nuisance_params(self): self._params = {learner: {key: [None] * self.n_rep for key in self._dml_data.d_cols} for learner in valid_learner} def _check_data(self, obj_dml_data): - if not isinstance(obj_dml_data, DoubleMLData): + if not isinstance(obj_dml_data, DoubleMLDIDData): raise TypeError( - "For repeated cross sections the data must be of DoubleMLData type. " + "For repeated cross sections the data must be of DoubleMLDIDData type. " f"{str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed." ) if obj_dml_data.z_cols is not None: diff --git a/doubleml/did/tests/test_datasets.py b/doubleml/did/tests/test_datasets.py index 54eb4074..8e079e9a 100644 --- a/doubleml/did/tests/test_datasets.py +++ b/doubleml/did/tests/test_datasets.py @@ -2,7 +2,7 @@ import pandas as pd import pytest -from doubleml import DoubleMLData +from doubleml import DoubleMLDIDData from doubleml.did.datasets import make_did_CS2021, make_did_cs_CS2021, make_did_SZ2020 msg_inv_return_type = "Invalid return_type." @@ -21,8 +21,8 @@ def dgp_type(request): @pytest.mark.ci def test_make_did_SZ2020_return_types(cross_sectional, dgp_type): np.random.seed(3141) - res = make_did_SZ2020(n_obs=100, dgp_type=dgp_type, cross_sectional_data=cross_sectional, return_type=DoubleMLData) - assert isinstance(res, DoubleMLData) + res = make_did_SZ2020(n_obs=100, dgp_type=dgp_type, cross_sectional_data=cross_sectional, return_type=DoubleMLDIDData) + assert isinstance(res, DoubleMLDIDData) res = make_did_SZ2020(n_obs=100, dgp_type=dgp_type, cross_sectional_data=cross_sectional, return_type=pd.DataFrame) assert isinstance(res, pd.DataFrame) if cross_sectional: diff --git a/doubleml/did/tests/test_did.py b/doubleml/did/tests/test_did.py index 90d53a95..79feb110 100644 --- a/doubleml/did/tests/test_did.py +++ b/doubleml/did/tests/test_did.py @@ -57,7 +57,7 @@ def dml_did_fixture(generate_data_did, learner, score, in_sample_normalization, np.random.seed(3141) n_obs = len(y) all_smpls = draw_smpls(n_obs, n_folds, n_rep=1, groups=d) - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d) + obj_dml_data = dml.DoubleMLDIDData.from_arrays(x, y, d) np.random.seed(3141) dml_did_obj = dml.DoubleMLDID( @@ -182,7 +182,7 @@ def test_dml_did_experimental(generate_data_did, in_sample_normalization, learne ml_m = clone(learner[1]) np.random.seed(3141) - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d) + obj_dml_data = dml.DoubleMLDIDData.from_arrays(x, y, d) np.random.seed(3141) dml_did_obj_without_ml_m = dml.DoubleMLDID( diff --git a/doubleml/did/tests/test_did_binary_exceptions.py b/doubleml/did/tests/test_did_binary_exceptions.py index c7aa2395..78c09a94 100644 --- a/doubleml/did/tests/test_did_binary_exceptions.py +++ b/doubleml/did/tests/test_did_binary_exceptions.py @@ -85,7 +85,7 @@ def test_check_data_exceptions(): # Test 1: Data has to be DoubleMLPanelData invalid_data_types = [ - dml.data.DoubleMLData(df, y_col="Col_0", d_cols="Col_1"), + dml.data.DoubleMLDIDData(df, y_col="Col_0", d_cols="Col_1"), ] for invalid_data in invalid_data_types: diff --git a/doubleml/did/tests/test_did_binary_tune.py b/doubleml/did/tests/test_did_binary_tune.py index a817223f..0962aa5b 100644 --- a/doubleml/did/tests/test_did_binary_tune.py +++ b/doubleml/did/tests/test_did_binary_tune.py @@ -64,7 +64,7 @@ def dml_did_fixture(generate_data_did_binary, learner_g, learner_m, score, in_sa n_obs = df_panel.shape[0] all_smpls = draw_smpls(n_obs, n_folds, n_rep=1, groups=df_panel["d"]) - obj_dml_data = dml.DoubleMLData(df_panel, y_col="y", d_cols="d", x_cols=["Z1", "Z2", "Z3", "Z4"]) + obj_dml_data = dml.DoubleMLDIDData(df_panel, y_col="y", d_cols="d", x_cols=["Z1", "Z2", "Z3", "Z4"]) # Set machine learning methods for m & g ml_g = clone(learner_g) diff --git a/doubleml/did/tests/test_did_binary_vs_did_panel.py b/doubleml/did/tests/test_did_binary_vs_did_panel.py index 426b413c..2eddccaf 100644 --- a/doubleml/did/tests/test_did_binary_vs_did_panel.py +++ b/doubleml/did/tests/test_did_binary_vs_did_panel.py @@ -79,7 +79,7 @@ def dml_did_binary_vs_did_fixture(time_type, learner, score, in_sample_normaliza dml_did_binary_obj.fit() df_wide = dml_did_binary_obj.data_subset.copy() - dml_data = dml.data.DoubleMLData(df_wide, y_col="y_diff", d_cols="G_indicator", x_cols=["Z1", "Z2", "Z3", "Z4"]) + dml_data = dml.data.DoubleMLDIDData(df_wide, y_col="y_diff", d_cols="G_indicator", x_cols=["Z1", "Z2", "Z3", "Z4"]) dml_did_obj = dml.DoubleMLDID( dml_data, **dml_args, diff --git a/doubleml/did/tests/test_did_binary_vs_did_two_period.py b/doubleml/did/tests/test_did_binary_vs_did_two_period.py index 0db2a752..74575664 100644 --- a/doubleml/did/tests/test_did_binary_vs_did_two_period.py +++ b/doubleml/did/tests/test_did_binary_vs_did_two_period.py @@ -56,7 +56,7 @@ def dml_did_binary_vs_did_fixture(generate_data_did_binary, learner, score, in_s n_obs = df_panel.shape[0] all_smpls = draw_smpls(n_obs, n_folds) - obj_dml_data = dml.DoubleMLData(df_panel, y_col="y", d_cols="d", x_cols=["Z1", "Z2", "Z3", "Z4"]) + obj_dml_data = dml.DoubleMLDIDData(df_panel, y_col="y", d_cols="d", x_cols=["Z1", "Z2", "Z3", "Z4"]) # Set machine learning methods for m & g ml_g = clone(learner[0]) diff --git a/doubleml/did/tests/test_did_cs.py b/doubleml/did/tests/test_did_cs.py index ae633588..bc8e2da6 100644 --- a/doubleml/did/tests/test_did_cs.py +++ b/doubleml/did/tests/test_did_cs.py @@ -59,7 +59,7 @@ def dml_did_cs_fixture(generate_data_did_cs, learner, score, in_sample_normaliza n_obs = len(y) all_smpls = draw_smpls(n_obs, n_folds, n_rep=1, groups=d + 2 * t) - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d, t=t) + obj_dml_data = dml.DoubleMLDIDData.from_arrays(x, y, d, t=t) np.random.seed(3141) dml_did_cs_obj = dml.DoubleMLDIDCS( @@ -185,7 +185,7 @@ def test_dml_did_cs_experimental(generate_data_did_cs, in_sample_normalization, ml_m = clone(learner[1]) np.random.seed(3141) - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d, t=t) + obj_dml_data = dml.DoubleMLDIDData.from_arrays(x, y, d, t=t) np.random.seed(3141) dml_did_obj_without_ml_m = dml.DoubleMLDIDCS( diff --git a/doubleml/did/tests/test_did_cs_binary_exceptions.py b/doubleml/did/tests/test_did_cs_binary_exceptions.py index b506da2d..e8d33939 100644 --- a/doubleml/did/tests/test_did_cs_binary_exceptions.py +++ b/doubleml/did/tests/test_did_cs_binary_exceptions.py @@ -85,7 +85,7 @@ def test_check_data_exceptions(): # Test 1: Data has to be DoubleMLPanelData invalid_data_types = [ - dml.data.DoubleMLData(df, y_col="Col_0", d_cols="Col_1"), + dml.data.DoubleMLDIDData(df, y_col="Col_0", d_cols="Col_1"), ] for invalid_data in invalid_data_types: diff --git a/doubleml/did/tests/test_did_cs_binary_tune.py b/doubleml/did/tests/test_did_cs_binary_tune.py index 0bd2c6ab..59db23dd 100644 --- a/doubleml/did/tests/test_did_cs_binary_tune.py +++ b/doubleml/did/tests/test_did_cs_binary_tune.py @@ -63,7 +63,7 @@ def dml_did_fixture(generate_data_did_binary, learner_g, learner_m, score, in_sa dml_panel_data = dml.data.DoubleMLPanelData( df, y_col="y", d_cols="d", id_col="id", t_col="t", x_cols=["Z1", "Z2", "Z3", "Z4"] ) - obj_dml_data = dml.DoubleMLData(df, y_col="y", d_cols="d", t_col="t", x_cols=["Z1", "Z2", "Z3", "Z4"]) + obj_dml_data = dml.DoubleMLDIDData(df, y_col="y", d_cols="d", t_col="t", x_cols=["Z1", "Z2", "Z3", "Z4"]) n_obs = df.shape[0] strata = df["d"] + 2 * df["t"] # only valid since it values are binary diff --git a/doubleml/did/tests/test_did_cs_binary_vs_did_cs_panel.py b/doubleml/did/tests/test_did_cs_binary_vs_did_cs_panel.py index 8fab2615..da7db085 100644 --- a/doubleml/did/tests/test_did_cs_binary_vs_did_cs_panel.py +++ b/doubleml/did/tests/test_did_cs_binary_vs_did_cs_panel.py @@ -76,7 +76,7 @@ def dml_did_binary_vs_did_fixture(time_type, learner, score, in_sample_normaliza dml_did_binary_obj.fit() df_subset = dml_did_binary_obj.data_subset.copy() - dml_data = dml.data.DoubleMLData( + dml_data = dml.data.DoubleMLDIDData( df_subset, y_col="y", d_cols="G_indicator", x_cols=["Z1", "Z2", "Z3", "Z4"], t_col="t_indicator" ) dml_did_obj = dml.DoubleMLDIDCS( diff --git a/doubleml/did/tests/test_did_cs_binary_vs_did_cs_two_period.py b/doubleml/did/tests/test_did_cs_binary_vs_did_cs_two_period.py index 73e6b827..b9e267ce 100644 --- a/doubleml/did/tests/test_did_cs_binary_vs_did_cs_two_period.py +++ b/doubleml/did/tests/test_did_cs_binary_vs_did_cs_two_period.py @@ -55,7 +55,7 @@ def dml_did_cs_binary_vs_did_cs_fixture(generate_data_did_binary, learner, score dml_panel_data = dml.data.DoubleMLPanelData( df, y_col="y", d_cols="d", id_col="id", t_col="t", x_cols=["Z1", "Z2", "Z3", "Z4"] ) - obj_dml_data = dml.DoubleMLData(df, y_col="y", d_cols="d", t_col="t", x_cols=["Z1", "Z2", "Z3", "Z4"]) + obj_dml_data = dml.DoubleMLDIDData(df, y_col="y", d_cols="d", t_col="t", x_cols=["Z1", "Z2", "Z3", "Z4"]) n_obs = df.shape[0] all_smpls = draw_smpls(n_obs, n_folds) diff --git a/doubleml/did/tests/test_did_cs_external_predictions.py b/doubleml/did/tests/test_did_cs_external_predictions.py index 2b28ac8a..1c5f6640 100644 --- a/doubleml/did/tests/test_did_cs_external_predictions.py +++ b/doubleml/did/tests/test_did_cs_external_predictions.py @@ -24,7 +24,7 @@ def n_rep(request): @pytest.fixture(scope="module") def doubleml_didcs_fixture(did_score, n_rep): ext_predictions = {"d": {}} - dml_data = make_did_SZ2020(n_obs=500, cross_sectional_data=True, return_type="DoubleMLData") + dml_data = make_did_SZ2020(n_obs=500, cross_sectional_data=True, return_type="DoubleMLDIDData") all_smpls = draw_smpls(len(dml_data.y), 5, n_rep=n_rep, groups=dml_data.d) kwargs = {"obj_dml_data": dml_data, "score": did_score, "n_rep": n_rep, "n_folds": 5, "draw_sample_splitting": False} dml_did_cs = DoubleMLDIDCS(ml_g=LinearRegression(), ml_m=LogisticRegression(), **kwargs) diff --git a/doubleml/did/tests/test_did_cs_tune.py b/doubleml/did/tests/test_did_cs_tune.py index 5ec33e82..50415937 100644 --- a/doubleml/did/tests/test_did_cs_tune.py +++ b/doubleml/did/tests/test_did_cs_tune.py @@ -67,7 +67,7 @@ def dml_did_cs_fixture(generate_data_did_cs, learner_g, learner_m, score, in_sam all_smpls = draw_smpls(n_obs, n_folds, n_rep=1, groups=d + 2 * t) np.random.seed(3141) - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d, t=t) + obj_dml_data = dml.DoubleMLDIDData.from_arrays(x, y, d, t=t) dml_did_cs_obj = dml.DoubleMLDIDCS( obj_dml_data, ml_g, diff --git a/doubleml/did/tests/test_did_external_predictions.py b/doubleml/did/tests/test_did_external_predictions.py index 7234be8e..194db374 100644 --- a/doubleml/did/tests/test_did_external_predictions.py +++ b/doubleml/did/tests/test_did_external_predictions.py @@ -24,7 +24,7 @@ def n_rep(request): @pytest.fixture(scope="module") def doubleml_did_fixture(did_score, n_rep): ext_predictions = {"d": {}} - dml_data = make_did_SZ2020(n_obs=500, return_type="DoubleMLData") + dml_data = make_did_SZ2020(n_obs=500, return_type="DoubleMLDIDData") all_smpls = draw_smpls(len(dml_data.y), 5, n_rep=n_rep, groups=dml_data.d) kwargs = {"obj_dml_data": dml_data, "score": did_score, "n_rep": n_rep, "draw_sample_splitting": False} dml_did = DoubleMLDID(ml_g=LinearRegression(), ml_m=LogisticRegression(), **kwargs) diff --git a/doubleml/did/tests/test_did_tune.py b/doubleml/did/tests/test_did_tune.py index 16ec2ee8..25899301 100644 --- a/doubleml/did/tests/test_did_tune.py +++ b/doubleml/did/tests/test_did_tune.py @@ -65,7 +65,7 @@ def dml_did_fixture(generate_data_did, learner_g, learner_m, score, in_sample_no ml_m = clone(learner_m) np.random.seed(3141) - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d) + obj_dml_data = dml.DoubleMLDIDData.from_arrays(x, y, d) dml_did_obj = dml.DoubleMLDID( obj_dml_data, ml_g, diff --git a/doubleml/did/tests/test_return_types.py b/doubleml/did/tests/test_return_types.py index 37105c3e..531a9706 100644 --- a/doubleml/did/tests/test_return_types.py +++ b/doubleml/did/tests/test_return_types.py @@ -3,7 +3,7 @@ import pytest from sklearn.linear_model import Lasso, LogisticRegression -from doubleml.data import DoubleMLData, DoubleMLPanelData +from doubleml.data import DoubleMLDIDData, DoubleMLPanelData from doubleml.did import DoubleMLDID, DoubleMLDIDBinary, DoubleMLDIDCS, DoubleMLDIDCSBinary from doubleml.did.datasets import make_did_CS2021, make_did_cs_CS2021, make_did_SZ2020 from doubleml.utils._check_return_types import ( @@ -37,8 +37,8 @@ (x, y, d, t) = make_did_SZ2020(n_obs=N_OBS, cross_sectional_data=True, return_type="array") binary_outcome = np.random.binomial(n=1, p=0.5, size=N_OBS) -datasets["did_binary_outcome"] = DoubleMLData.from_arrays(x, binary_outcome, d) -datasets["did_cs_binary_outcome"] = DoubleMLData.from_arrays(x, binary_outcome, d, t=t) +datasets["did_binary_outcome"] = DoubleMLDIDData.from_arrays(x, binary_outcome, d) +datasets["did_cs_binary_outcome"] = DoubleMLDIDData.from_arrays(x, binary_outcome, d, t=t) dml_objs = [ (DoubleMLDID(datasets["did"], Lasso(), LogisticRegression(), **dml_args), DoubleMLDID), diff --git a/doubleml/double_ml.py b/doubleml/double_ml.py index 694968bc..4fbf0bd3 100644 --- a/doubleml/double_ml.py +++ b/doubleml/double_ml.py @@ -7,19 +7,19 @@ from scipy.stats import norm from sklearn.base import is_classifier, is_regressor -from doubleml.data import DoubleMLClusterData, DoubleMLPanelData +from doubleml.data import DoubleMLDIDData, DoubleMLPanelData, DoubleMLRDDData, DoubleMLSSMData from doubleml.data.base_data import DoubleMLBaseData from doubleml.double_ml_framework import DoubleMLFramework -from doubleml.utils._checks import _check_external_predictions, _check_sample_splitting +from doubleml.double_ml_sampling_mixins import SampleSplittingMixin +from doubleml.utils._checks import _check_external_predictions from doubleml.utils._estimation import _aggregate_coefs_and_ses, _rmse, _set_external_predictions, _var_est from doubleml.utils._sensitivity import _compute_sensitivity_bias from doubleml.utils.gain_statistics import gain_statistics -from doubleml.utils.resampling import DoubleMLClusterResampling, DoubleMLResampling -_implemented_data_backends = ["DoubleMLData", "DoubleMLClusterData"] +_implemented_data_backends = ["DoubleMLData", "DoubleMLClusterData", "DoubleMLDIDData", "DoubleMLSSMData", "DoubleMLRDDData"] -class DoubleML(ABC): +class DoubleML(SampleSplittingMixin, ABC): """Double Machine Learning.""" def __init__(self, obj_dml_data, n_folds, n_rep, score, draw_sample_splitting): @@ -30,13 +30,22 @@ def __init__(self, obj_dml_data, n_folds, n_rep, score, draw_sample_splitting): f"{str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed." ) self._is_cluster_data = False - if isinstance(obj_dml_data, DoubleMLClusterData): + if obj_dml_data.is_cluster_data: if obj_dml_data.n_cluster_vars > 2: raise NotImplementedError("Multi-way (n_ways > 2) clustering not yet implemented.") self._is_cluster_data = True self._is_panel_data = False if isinstance(obj_dml_data, DoubleMLPanelData): self._is_panel_data = True + self._is_did_data = False + if isinstance(obj_dml_data, DoubleMLDIDData): + self._is_did_data = True + self._is_ssm_data = False + if isinstance(obj_dml_data, DoubleMLSSMData): + self._is_ssm_data = True + self._is_rdd_data = False + if isinstance(obj_dml_data, DoubleMLRDDData): + self._is_rdd_data = True self._dml_data = obj_dml_data self._n_obs = self._dml_data.n_obs @@ -101,10 +110,8 @@ def __init__(self, obj_dml_data, n_folds, n_rep, score, draw_sample_splitting): self._n_obs_sample_splitting = self.n_obs if draw_sample_splitting: self.draw_sample_splitting() - self._score_dim = (self._dml_data.n_obs, self.n_rep, self._dml_data.n_coefs) - # initialize arrays according to obj_dml_data and the resampling settings - self._initialize_arrays() + self._initialize_dml_model() # initialize instance attributes which are later used for iterating self._i_rep = None @@ -1192,7 +1199,7 @@ def evaluate_learners(self, learners=None, metric=_rmse): >>> import numpy as np >>> import doubleml as dml >>> from sklearn.metrics import mean_absolute_error - >>> from doubleml.datasets import make_irm_data + >>> from doubleml.irm.datasets import make_irm_data >>> from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier >>> np.random.seed(3141) >>> ml_g = RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2) @@ -1238,103 +1245,11 @@ def evaluate_learners(self, learners=None, metric=_rmse): f"The learners have to be a subset of {str(self.params_names)}. Learners {str(learners)} provided." ) - def draw_sample_splitting(self): - """ - Draw sample splitting for DoubleML models. - - The samples are drawn according to the attributes - ``n_folds`` and ``n_rep``. - - Returns - ------- - self : object - """ - if self._is_cluster_data: - obj_dml_resampling = DoubleMLClusterResampling( - n_folds=self._n_folds_per_cluster, - n_rep=self.n_rep, - n_obs=self._n_obs_sample_splitting, - n_cluster_vars=self._dml_data.n_cluster_vars, - cluster_vars=self._dml_data.cluster_vars, - ) - self._smpls, self._smpls_cluster = obj_dml_resampling.split_samples() - else: - obj_dml_resampling = DoubleMLResampling( - n_folds=self.n_folds, n_rep=self.n_rep, n_obs=self._n_obs_sample_splitting, stratify=self._strata - ) - self._smpls = obj_dml_resampling.split_samples() - - return self - - def set_sample_splitting(self, all_smpls, all_smpls_cluster=None): - """ - Set the sample splitting for DoubleML models. - - The attributes ``n_folds`` and ``n_rep`` are derived from the provided partition. - - Parameters - ---------- - all_smpls : list or tuple - If nested list of lists of tuples: - The outer list needs to provide an entry per repeated sample splitting (length of list is set as - ``n_rep``). - The inner list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as - ``n_folds``). test_ind must form a partition for each inner list. - If list of tuples: - The list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as - ``n_folds``). test_ind must form a partition. ``n_rep=1`` is always set. - If tuple: - Must be a tuple with two elements train_ind and test_ind. Only viable option is to set - train_ind and test_ind to np.arange(n_obs), which corresponds to no sample splitting. - ``n_folds=1`` and ``n_rep=1`` is always set. - - all_smpls_cluster : list or None - Nested list or ``None``. The first level of nesting corresponds to the number of repetitions. The second level - of nesting corresponds to the number of folds. The third level of nesting contains a tuple of training and - testing lists. Both training and testing contain an array for each cluster variable, which form a partition of - the clusters. - Default is ``None``. - - Returns - ------- - self : object - - Examples - -------- - >>> import numpy as np - >>> import doubleml as dml - >>> from doubleml.datasets import make_plr_CCDDHNR2018 - >>> from sklearn.ensemble import RandomForestRegressor - >>> from sklearn.base import clone - >>> np.random.seed(3141) - >>> learner = RandomForestRegressor(max_depth=2, n_estimators=10) - >>> ml_g = learner - >>> ml_m = learner - >>> obj_dml_data = make_plr_CCDDHNR2018(n_obs=10, alpha=0.5) - >>> dml_plr_obj = dml.DoubleMLPLR(obj_dml_data, ml_g, ml_m) - >>> # simple sample splitting with two folds and without cross-fitting - >>> smpls = ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) - >>> dml_plr_obj.set_sample_splitting(smpls) - >>> # sample splitting with two folds and cross-fitting - >>> smpls = [([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), - >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])] - >>> dml_plr_obj.set_sample_splitting(smpls) - >>> # sample splitting with two folds and repeated cross-fitting with n_rep = 2 - >>> smpls = [[([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), - >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])], - >>> [([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]), - >>> ([1, 3, 5, 7, 9], [0, 2, 4, 6, 8])]] - >>> dml_plr_obj.set_sample_splitting(smpls) - """ - self._smpls, self._smpls_cluster, self._n_rep, self._n_folds = _check_sample_splitting( - all_smpls, all_smpls_cluster, self._dml_data, self._is_cluster_data, n_obs=self._n_obs_sample_splitting - ) - - # set sample splitting can update the number of repetitions + def _initialize_dml_model(self): self._score_dim = (self._score_dim[0], self._n_rep, self._score_dim[2]) self._initialize_arrays() - self._initialize_ml_nuisance_params() - + if self._learner: # for calling in __init__ of subclasses, we need to check if _learner is already set + self._initialize_ml_nuisance_params() return self def _est_causal_pars(self, psi_elements): diff --git a/doubleml/double_ml_sampling_mixins.py b/doubleml/double_ml_sampling_mixins.py new file mode 100644 index 00000000..bd9d0c13 --- /dev/null +++ b/doubleml/double_ml_sampling_mixins.py @@ -0,0 +1,122 @@ +from abc import abstractmethod + +from doubleml.utils._checks import _check_sample_splitting +from doubleml.utils.resampling import DoubleMLClusterResampling, DoubleMLResampling + + +class SampleSplittingMixin: + """ + Mixin class implementing sample splitting for DoubleML models. + + Notes + ----- + The mixin class :class:`SampleSplittingMixin` implements the sample splitting procedure for DoubleML models. + The sample splitting is drawn according to the attributes ``n_folds`` and ``n_rep``. + If the data is clustered, the sample splitting is drawn such that clusters are not split across folds. + For details, see the chapter on + `sample splitting `_ in the DoubleML user guide. + """ + + def draw_sample_splitting(self): + """ + Draw sample splitting for DoubleML models. + + The samples are drawn according to the attributes + ``n_folds`` and ``n_rep``. + + Returns + ------- + self : object + """ + if self._is_cluster_data: + obj_dml_resampling = DoubleMLClusterResampling( + n_folds=self._n_folds_per_cluster, + n_rep=self.n_rep, + n_obs=self._n_obs_sample_splitting, + n_cluster_vars=self._dml_data.n_cluster_vars, + cluster_vars=self._dml_data.cluster_vars, + ) + self._smpls, self._smpls_cluster = obj_dml_resampling.split_samples() + else: + obj_dml_resampling = DoubleMLResampling( + n_folds=self.n_folds, n_rep=self.n_rep, n_obs=self._n_obs_sample_splitting, stratify=self._strata + ) + self._smpls = obj_dml_resampling.split_samples() + + return self + + def set_sample_splitting(self, all_smpls, all_smpls_cluster=None): + """ + Set the sample splitting for DoubleML models. + + The attributes ``n_folds`` and ``n_rep`` are derived from the provided partition. + + Parameters + ---------- + all_smpls : list or tuple + If nested list of lists of tuples: + The outer list needs to provide an entry per repeated sample splitting (length of list is set as + ``n_rep``). + The inner list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as + ``n_folds``). test_ind must form a partition for each inner list. + If list of tuples: + The list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as + ``n_folds``). test_ind must form a partition. ``n_rep=1`` is always set. + If tuple: + Must be a tuple with two elements train_ind and test_ind. Only viable option is to set + train_ind and test_ind to np.arange(n_obs), which corresponds to no sample splitting. + ``n_folds=1`` and ``n_rep=1`` is always set. + + all_smpls_cluster : list or None + Nested list or ``None``. The first level of nesting corresponds to the number of repetitions. The second level + of nesting corresponds to the number of folds. The third level of nesting contains a tuple of training and + testing lists. Both training and testing contain an array for each cluster variable, which form a partition of + the clusters. + Default is ``None``. + + Returns + ------- + self : object + + Examples + -------- + >>> import numpy as np + >>> import doubleml as dml + >>> from doubleml.plm.datasets import make_plr_CCDDHNR2018 + >>> from sklearn.ensemble import RandomForestRegressor + >>> from sklearn.base import clone + >>> np.random.seed(3141) + >>> learner = RandomForestRegressor(max_depth=2, n_estimators=10) + >>> ml_g = learner + >>> ml_m = learner + >>> obj_dml_data = make_plr_CCDDHNR2018(n_obs=10, alpha=0.5) + >>> dml_plr_obj = dml.DoubleMLPLR(obj_dml_data, ml_g, ml_m) + >>> # simple sample splitting with two folds and without cross-fitting + >>> smpls = ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + >>> dml_plr_obj.set_sample_splitting(smpls) + >>> # sample splitting with two folds and cross-fitting + >>> smpls = [([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), + >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])] + >>> dml_plr_obj.set_sample_splitting(smpls) + >>> # sample splitting with two folds and repeated cross-fitting with n_rep = 2 + >>> smpls = [[([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), + >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])], + >>> [([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]), + >>> ([1, 3, 5, 7, 9], [0, 2, 4, 6, 8])]] + >>> dml_plr_obj.set_sample_splitting(smpls) + """ + self._smpls, self._smpls_cluster, self._n_rep, self._n_folds = _check_sample_splitting( + all_smpls, all_smpls_cluster, self._dml_data, self._is_cluster_data, n_obs=self._n_obs_sample_splitting + ) + + self._initialize_dml_model() + + return self + + @abstractmethod + def _initialize_dml_model(self): + """ + Set sample splitting for DoubleML models. Can update the number of repetitions. + Updates model dimensions to (n_folds, n_rep). + This method needs to be implemented in the child class. + """ diff --git a/doubleml/irm/apo.py b/doubleml/irm/apo.py index 9fcad876..0de311bc 100644 --- a/doubleml/irm/apo.py +++ b/doubleml/irm/apo.py @@ -102,6 +102,7 @@ def __init__( self._treated = self._dml_data.d == self._treatment_level self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data valid_scores = ["APO"] _check_score(self.score, valid_scores, allow_callable=False) diff --git a/doubleml/irm/apos.py b/doubleml/irm/apos.py index 8099342a..5a6d41fc 100644 --- a/doubleml/irm/apos.py +++ b/doubleml/irm/apos.py @@ -6,18 +6,18 @@ from joblib import Parallel, delayed from sklearn.base import clone -from doubleml.data import DoubleMLClusterData, DoubleMLData +from doubleml.data import DoubleMLData from doubleml.double_ml import DoubleML from doubleml.double_ml_framework import concat +from doubleml.double_ml_sampling_mixins import SampleSplittingMixin from doubleml.irm.apo import DoubleMLAPO -from doubleml.utils._checks import _check_sample_splitting, _check_score, _check_trimming, _check_weights +from doubleml.utils._checks import _check_score, _check_trimming, _check_weights from doubleml.utils._descriptive import generate_summary from doubleml.utils._sensitivity import _compute_sensitivity_bias from doubleml.utils.gain_statistics import gain_statistics -from doubleml.utils.resampling import DoubleMLResampling -class DoubleMLAPOS: +class DoubleMLAPOS(SampleSplittingMixin): """Double machine learning for interactive regression models with multiple discrete treatments.""" def __init__( @@ -36,8 +36,8 @@ def __init__( draw_sample_splitting=True, ): self._dml_data = obj_dml_data - self._is_cluster_data = isinstance(obj_dml_data, DoubleMLClusterData) self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data self._all_treatment_levels = np.unique(self._dml_data.d) @@ -88,11 +88,13 @@ def __init__( # perform sample splitting self._smpls = None + self._n_obs_sample_splitting = self._dml_data.n_obs + self._strata = self._dml_data.d if draw_sample_splitting: self.draw_sample_splitting() # initialize all models if splits are known - self._modellist = self._initialize_models() + self._initialize_dml_model() def __str__(self): class_name = self.__class__.__name__ @@ -625,80 +627,8 @@ def sensitivity_benchmark(self, benchmarking_set, fit_args=None): df_benchmark = pd.DataFrame(benchmark_dict, index=self.treatment_levels) return df_benchmark - def draw_sample_splitting(self): - """ - Draw sample splitting for DoubleML models. - - The samples are drawn according to the attributes - ``n_folds`` and ``n_rep``. - - Returns - ------- - self : object - """ - obj_dml_resampling = DoubleMLResampling( - n_folds=self.n_folds, n_rep=self.n_rep, n_obs=self._dml_data.n_obs, stratify=self._dml_data.d - ) - self._smpls = obj_dml_resampling.split_samples() - - return self - - def set_sample_splitting(self, all_smpls, all_smpls_cluster=None): - """ - Set the sample splitting for DoubleML models. - - The attributes ``n_folds`` and ``n_rep`` are derived from the provided partition. - - Parameters - ---------- - all_smpls : list or tuple - If nested list of lists of tuples: - The outer list needs to provide an entry per repeated sample splitting (length of list is set as - ``n_rep``). - The inner list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as - ``n_folds``). test_ind must form a partition for each inner list. - If list of tuples: - The list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as - ``n_folds``). test_ind must form a partition. ``n_rep=1`` is always set. - If tuple: - Must be a tuple with two elements train_ind and test_ind. Only viable option is to set - train_ind and test_ind to np.arange(n_obs), which corresponds to no sample splitting. - ``n_folds=1`` and ``n_rep=1`` is always set. - - Returns - ------- - self : object - - Examples - -------- - >>> import numpy as np - >>> import doubleml as dml - >>> from doubleml.datasets import make_plr_CCDDHNR2018 - >>> from sklearn.ensemble import RandomForestRegressor - >>> from sklearn.base import clone - >>> np.random.seed(3141) - >>> learner = RandomForestRegressor(max_depth=2, n_estimators=10) - >>> ml_g = learner - >>> ml_m = learner - >>> obj_dml_data = make_plr_CCDDHNR2018(n_obs=10, alpha=0.5) - >>> dml_plr_obj = dml.DoubleMLPLR(obj_dml_data, ml_g, ml_m) - >>> # sample splitting with two folds and cross-fitting - >>> smpls = [([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), - >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])] - >>> dml_plr_obj.set_sample_splitting(smpls) - >>> # sample splitting with two folds and repeated cross-fitting with n_rep = 2 - >>> smpls = [[([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), - >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])], - >>> [([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]), - >>> ([1, 3, 5, 7, 9], [0, 2, 4, 6, 8])]] - >>> dml_plr_obj.set_sample_splitting(smpls) - """ - self._smpls, self._smpls_cluster, self._n_rep, self._n_folds = _check_sample_splitting( - all_smpls, all_smpls_cluster, self._dml_data, self._is_cluster_data - ) - + def _initialize_dml_model(self): self._modellist = self._initialize_models() - return self def causal_contrast(self, reference_levels): @@ -824,7 +754,7 @@ def _check_treatment_levels(self, treatment_levels): def _check_data(self, obj_dml_data): if not isinstance(obj_dml_data, DoubleMLData): - raise TypeError("The data must be of DoubleMLData or DoubleMLClusterData type.") + raise TypeError("The data must be of DoubleMLData type.") if obj_dml_data.z is not None: raise ValueError("The data must not contain instrumental variables.") return diff --git a/doubleml/irm/cvar.py b/doubleml/irm/cvar.py index 29d78f15..dd6e4737 100644 --- a/doubleml/irm/cvar.py +++ b/doubleml/irm/cvar.py @@ -82,7 +82,7 @@ class DoubleMLCVAR(LinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_irm_data + >>> from doubleml.irm.datasets import make_irm_data >>> from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor >>> np.random.seed(3141) >>> ml_g = RandomForestRegressor(n_estimators=100, max_features=20, max_depth=10, min_samples_leaf=2) @@ -117,6 +117,7 @@ def __init__( self._normalize_ipw = normalize_ipw self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data valid_score = ["CVaR"] _check_score(self.score, valid_score, allow_callable=False) _check_quantile(self.quantile) diff --git a/doubleml/irm/datasets/__init__.py b/doubleml/irm/datasets/__init__.py new file mode 100644 index 00000000..c1525eea --- /dev/null +++ b/doubleml/irm/datasets/__init__.py @@ -0,0 +1,19 @@ +""" +The :mod:`doubleml.irm.datasets` module implements data generating processes for interactive regression models. +""" + +from .dgp_confounded_irm_data import make_confounded_irm_data +from .dgp_heterogeneous_data import make_heterogeneous_data +from .dgp_iivm_data import make_iivm_data +from .dgp_irm_data import make_irm_data +from .dgp_irm_data_discrete_treatments import make_irm_data_discrete_treatments +from .dgp_ssm_data import make_ssm_data + +__all__ = [ + "make_confounded_irm_data", + "make_heterogeneous_data", + "make_iivm_data", + "make_irm_data", + "make_irm_data_discrete_treatments", + "make_ssm_data", +] diff --git a/doubleml/irm/datasets/dgp_confounded_irm_data.py b/doubleml/irm/datasets/dgp_confounded_irm_data.py new file mode 100644 index 00000000..392f18a0 --- /dev/null +++ b/doubleml/irm/datasets/dgp_confounded_irm_data.py @@ -0,0 +1,233 @@ +import warnings + +import numpy as np +from scipy.linalg import toeplitz + + +def make_confounded_irm_data(n_obs=500, theta=0.0, gamma_a=0.127, beta_a=0.58, linear=False, **kwargs): + """ + Generates counfounded data from an interactive regression model. + + The data generating process is defined as follows (inspired by the Monte Carlo simulation used + in Sant'Anna and Zhao (2020)). + + Let :math:`X= (X_1, X_2, X_3, X_4, X_5)^T \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` corresponds + to the identity matrix. + Further, define :math:`Z_j = (\\tilde{Z_j} - \\mathbb{E}[\\tilde{Z}_j]) / \\sqrt{\\text{Var}(\\tilde{Z}_j)}`, + where + + .. math:: + + \\tilde{Z}_1 &= \\exp(0.5 \\cdot X_1) + + \\tilde{Z}_2 &= 10 + X_2/(1 + \\exp(X_1)) + + \\tilde{Z}_3 &= (0.6 + X_1 \\cdot X_3 / 25)^3 + + \\tilde{Z}_4 &= (20 + X_2 + X_4)^2 + + \\tilde{Z}_5 &= X_5. + + Additionally, generate a confounder :math:`A \\sim \\mathcal{U}[-1, 1]`. + At first, define the propensity score as + + .. math:: + + m(X, A) = P(D=1|X,A) = p(Z) + \\gamma_A \\cdot A + + where + + .. math:: + + p(Z) &= \\frac{\\exp(f_{ps}(Z))}{1 + \\exp(f_{ps}(Z))}, + + f_{ps}(Z) &= 0.75 \\cdot (-Z_1 + 0.1 \\cdot Z_2 -0.25 \\cdot Z_3 - 0.1 \\cdot Z_4). + + and generate the treatment :math:`D = 1\\{m(X, A) \\ge U\\}` with :math:`U \\sim \\mathcal{U}[0, 1]`. + Since :math:`A` is independent of :math:`X`, the short form of the propensity score is given as + + .. math:: + + P(D=1|X) = p(Z). + + Further, generate the outcome of interest :math:`Y` as + + .. math:: + + Y &= \\theta \\cdot D (Z_5 + 1) + g(Z) + \\beta_A \\cdot A + \\varepsilon + + g(Z) &= 2.5 + 0.74 \\cdot Z_1 + 0.25 \\cdot Z_2 + 0.137 \\cdot (Z_3 + Z_4) + + where :math:`\\varepsilon \\sim \\mathcal{N}(0,5)`. + This implies an average treatment effect of :math:`\\theta`. Additionally, the long and short forms of + the conditional expectation take the following forms + + .. math:: + + \\mathbb{E}[Y|D, X, A] &= \\theta \\cdot D (Z_5 + 1) + g(Z) + \\beta_A \\cdot A + + \\mathbb{E}[Y|D, X] &= (\\theta + \\beta_A \\frac{\\mathrm{Cov}(A, D(Z_5 + 1))}{\\mathrm{Var}(D(Z_5 + 1))}) + \\cdot D (Z_5 + 1) + g(Z). + + Consequently, the strength of confounding is determined via :math:`\\gamma_A` and :math:`\\beta_A`, which can be + set via the parameters ``gamma_a`` and ``beta_a``. + + The observed data is given as :math:`W = (Y, D, Z)`. + Further, orcale values of the confounder :math:`A`, the transformed covariated :math:`Z`, + the potential outcomes of :math:`Y`, the long and short forms of the main regression and the propensity score and + in sample versions of the confounding parameters :math:`cf_d` and :math:`cf_y` (for ATE and ATTE) + are returned in a dictionary. + + Parameters + ---------- + n_obs : int + The number of observations to simulate. + Default is ``500``. + theta : float or int + Average treatment effect. + Default is ``0.0``. + gamma_a : float + Coefficient of the unobserved confounder in the propensity score. + Default is ``0.127``. + beta_a : float + Coefficient of the unobserved confounder in the outcome regression. + Default is ``0.58``. + linear : bool + If ``True``, the Z will be set to X, such that the underlying (short) models are linear/logistic. + Default is ``False``. + + Returns + ------- + res_dict : dictionary + Dictionary with entries ``x``, ``y``, ``d`` and ``oracle_values``. + + References + ---------- + Sant'Anna, P. H. and Zhao, J. (2020), + Doubly robust difference-in-differences estimators. Journal of Econometrics, 219(1), 101-122. + doi:`10.1016/j.jeconom.2020.06.003 `_. + """ + c = 0.0 # the confounding strength is only valid for c=0 + xi = 0.75 + dim_x = kwargs.get("dim_x", 5) + trimming_threshold = kwargs.get("trimming_threshold", 0.01) + var_eps_y = kwargs.get("var_eps_y", 1.0) + + # Specification of main regression function + def f_reg(w): + res = 2.5 + 0.74 * w[:, 0] + 0.25 * w[:, 1] + 0.137 * (w[:, 2] + w[:, 3]) + return res + + # Specification of prop score function + def f_ps(w, xi): + res = xi * (-w[:, 0] + 0.1 * w[:, 1] - 0.25 * w[:, 2] - 0.1 * w[:, 3]) + return res + + # observed covariates + cov_mat = toeplitz([np.power(c, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs, + ], + ) + z_tilde_1 = np.exp(0.5 * x[:, 0]) + z_tilde_2 = 10 + x[:, 1] / (1 + np.exp(x[:, 0])) + z_tilde_3 = (0.6 + x[:, 0] * x[:, 2] / 25) ** 3 + z_tilde_4 = (20 + x[:, 1] + x[:, 3]) ** 2 + z_tilde_5 = x[:, 4] + z_tilde = np.column_stack((z_tilde_1, z_tilde_2, z_tilde_3, z_tilde_4, z_tilde_5)) + z = (z_tilde - np.mean(z_tilde, axis=0)) / np.std(z_tilde, axis=0) + # error terms and unobserved confounder + eps_y = np.random.normal(loc=0, scale=np.sqrt(var_eps_y), size=n_obs) + # unobserved confounder + a_bounds = (-1, 1) + a = np.random.uniform(low=a_bounds[0], high=a_bounds[1], size=n_obs) + var_a = np.square(a_bounds[1] - a_bounds[0]) / 12 + + # Choose the features used in the models + if linear: + features_ps = x + features_reg = x + else: + features_ps = z + features_reg = z + + p = np.exp(f_ps(features_ps, xi)) / (1 + np.exp(f_ps(features_ps, xi))) + # compute short and long form of propensity score + m_long = p + gamma_a * a + m_short = p + # check propensity score bounds + if np.any(m_long < trimming_threshold) or np.any(m_long > 1.0 - trimming_threshold): + m_long = np.clip(m_long, trimming_threshold, 1.0 - trimming_threshold) + m_short = np.clip(m_short, trimming_threshold, 1.0 - trimming_threshold) + warnings.warn( + f"Propensity score is close to 0 or 1. " + f"Trimming is at {trimming_threshold} and {1.0 - trimming_threshold} is applied" + ) + # generate treatment based on long form + u = np.random.uniform(low=0, high=1, size=n_obs) + d = 1.0 * (m_long >= u) + # add treatment heterogeneity + d1x = z[:, 4] + 1 + var_dx = np.var(d * (d1x)) + cov_adx = gamma_a * var_a + # Outcome regression + g_partial_reg = f_reg(features_reg) + # short model + g_short_d0 = g_partial_reg + g_short_d1 = (theta + beta_a * cov_adx / var_dx) * d1x + g_partial_reg + g_short = d * g_short_d1 + (1.0 - d) * g_short_d0 + # long model + g_long_d0 = g_partial_reg + beta_a * a + g_long_d1 = theta * d1x + g_partial_reg + beta_a * a + g_long = d * g_long_d1 + (1.0 - d) * g_long_d0 + # Potential outcomes + y_0 = g_long_d0 + eps_y + y_1 = g_long_d1 + eps_y + # Realized outcome + y = d * y_1 + (1.0 - d) * y_0 + # In-sample values for confounding strength + explained_residual_variance = np.square(g_long - g_short) + residual_variance = np.square(y - g_short) + cf_y = np.mean(explained_residual_variance) / np.mean(residual_variance) + # compute the Riesz representation + treated_weight = d / np.mean(d) + untreated_weight = (1.0 - d) / np.mean(d) + # Odds ratios + propensity_ratio_long = m_long / (1.0 - m_long) + rr_long_ate = d / m_long - (1.0 - d) / (1.0 - m_long) + rr_long_atte = treated_weight - np.multiply(untreated_weight, propensity_ratio_long) + propensity_ratio_short = m_short / (1.0 - m_short) + rr_short_ate = d / m_short - (1.0 - d) / (1.0 - m_short) + rr_short_atte = treated_weight - np.multiply(untreated_weight, propensity_ratio_short) + cf_d_ate = (np.mean(1 / (m_long * (1 - m_long))) - np.mean(1 / (m_short * (1 - m_short)))) / np.mean( + 1 / (m_long * (1 - m_long)) + ) + cf_d_atte = (np.mean(propensity_ratio_long) - np.mean(propensity_ratio_short)) / np.mean(propensity_ratio_long) + if (beta_a == 0) | (gamma_a == 0): + rho_ate = 0.0 + rho_atte = 0.0 + else: + rho_ate = np.corrcoef((g_long - g_short), (rr_long_ate - rr_short_ate))[0, 1] + rho_atte = np.corrcoef((g_long - g_short), (rr_long_atte - rr_short_atte))[0, 1] + oracle_values = { + "g_long": g_long, + "g_short": g_short, + "m_long": m_long, + "m_short": m_short, + "gamma_a": gamma_a, + "beta_a": beta_a, + "a": a, + "y_0": y_0, + "y_1": y_1, + "z": z, + "cf_y": cf_y, + "cf_d_ate": cf_d_ate, + "cf_d_atte": cf_d_atte, + "rho_ate": rho_ate, + "rho_atte": rho_atte, + } + res_dict = {"x": x, "y": y, "d": d, "oracle_values": oracle_values} + return res_dict diff --git a/doubleml/irm/datasets/dgp_heterogeneous_data.py b/doubleml/irm/datasets/dgp_heterogeneous_data.py new file mode 100644 index 00000000..0f1a1b15 --- /dev/null +++ b/doubleml/irm/datasets/dgp_heterogeneous_data.py @@ -0,0 +1,114 @@ +import numpy as np +import pandas as pd + + +def make_heterogeneous_data(n_obs=200, p=30, support_size=5, n_x=1, binary_treatment=False): + """ + Creates a simple synthetic example for heterogeneous treatment effects. + The data generating process is based on the Monte Carlo simulation from Oprescu et al. (2019). + + The data is generated as + + .. math:: + + Y_i & = \\theta_0(X_i)D_i + \\langle X_i,\\gamma_0\\rangle + \\epsilon_i + + D_i & = \\langle X_i,\\beta_0\\rangle + \\eta_i, + + where :math:`X_i\\sim\\mathcal{U}[0,1]^{p}` and :math:`\\epsilon_i,\\eta_i + \\sim\\mathcal{U}[-1,1]`. + If the treatment is set to be binary, the treatment is generated as + + .. math:: + D_i = 1\\{\\langle X_i,\\beta_0\\rangle \\ge \\eta_i\\}. + + The coefficient vectors :math:`\\gamma_0` and :math:`\\beta_0` both have small random (identical) support + which values are drawn independently from :math:`\\mathcal{U}[0,1]` and :math:`\\mathcal{U}[0,0.3]`. + Further, :math:`\\theta_0(x)` defines the conditional treatment effect, which is defined differently depending + on the dimension of :math:`x`. + + If the heterogeneity is univariate the conditional treatment effect takes the following form + + .. math:: + \\theta_0(x) = \\exp(2x_0) + 3\\sin(4x_0), + + whereas for the two-dimensional case the conditional treatment effect is defined as + + .. math:: + \\theta_0(x) = \\exp(2x_0) + 3\\sin(4x_1). + + Parameters + ---------- + n_obs : int + Number of observations to simulate. + Default is ``200``. + + p : int + Dimension of covariates. + Default is ``30``. + + support_size : int + Number of relevant (confounding) covariates. + Default is ``5``. + + n_x : int + Dimension of the heterogeneity. Can be either ``1`` or ``2``. + Default is ``1``. + + binary_treatment : bool + Indicates whether the treatment is binary. + Default is ``False``. + + Returns + ------- + res_dict : dictionary + Dictionary with entries ``data``, ``effects``, ``treatment_effect``. + + """ + # simple input checks + assert n_x in [1, 2], "n_x must be either 1 or 2." + assert support_size <= p, "support_size must be smaller than p." + assert isinstance(binary_treatment, bool), "binary_treatment must be a boolean." + + # define treatment effects + if n_x == 1: + + def treatment_effect(x): + return np.exp(2 * x[:, 0]) + 3 * np.sin(4 * x[:, 0]) + + else: + assert n_x == 2 + + # redefine treatment effect + def treatment_effect(x): + return np.exp(2 * x[:, 0]) + 3 * np.sin(4 * x[:, 1]) + + # Outcome support and coefficients + support_y = np.random.choice(np.arange(p), size=support_size, replace=False) + coefs_y = np.random.uniform(0, 1, size=support_size) + # treatment support and coefficients + support_d = support_y + coefs_d = np.random.uniform(0, 0.3, size=support_size) + + # noise + epsilon = np.random.uniform(-1, 1, size=n_obs) + eta = np.random.uniform(-1, 1, size=n_obs) + + # Generate controls, covariates, treatments and outcomes + x = np.random.uniform(0, 1, size=(n_obs, p)) + # Heterogeneous treatment effects + te = treatment_effect(x) + if binary_treatment: + d = 1.0 * (np.dot(x[:, support_d], coefs_d) >= eta) + else: + d = np.dot(x[:, support_d], coefs_d) + eta + y = te * d + np.dot(x[:, support_y], coefs_y) + epsilon + + # Now we build the dataset + y_df = pd.DataFrame({"y": y}) + d_df = pd.DataFrame({"d": d}) + x_df = pd.DataFrame(data=x, index=np.arange(x.shape[0]), columns=[f"X_{i}" for i in range(x.shape[1])]) + + data = pd.concat([y_df, d_df, x_df], axis=1) + res_dict = {"data": data, "effects": te, "treatment_effect": treatment_effect} + return res_dict diff --git a/doubleml/irm/datasets/dgp_iivm_data.py b/doubleml/irm/datasets/dgp_iivm_data.py new file mode 100644 index 00000000..e8c1130f --- /dev/null +++ b/doubleml/irm/datasets/dgp_iivm_data.py @@ -0,0 +1,102 @@ +import numpy as np +import pandas as pd +from scipy.linalg import toeplitz + +from doubleml.data import DoubleMLData +from doubleml.utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_data_alias + +_array_alias = _get_array_alias() +_data_frame_alias = _get_data_frame_alias() +_dml_data_alias = _get_dml_data_alias() + + +def make_iivm_data(n_obs=500, dim_x=20, theta=1.0, alpha_x=0.2, return_type="DoubleMLData"): + """ + Generates data from a interactive IV regression (IIVM) model. + The data generating process is defined as + + .. math:: + + d_i &= 1\\left\\lbrace \\alpha_x Z + v_i > 0 \\right\\rbrace, + + y_i &= \\theta d_i + x_i' \\beta + u_i, + + with :math:`Z \\sim \\text{Bernoulli}(0.5)` and + + .. math:: + + \\left(\\begin{matrix} u_i \\\\ v_i \\end{matrix} \\right) \\sim + \\mathcal{N}\\left(0, \\left(\\begin{matrix} 1 & 0.3 \\\\ 0.3 & 1 \\end{matrix} \\right) \\right). + + The covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a matrix with entries + :math:`\\Sigma_{kj} = 0.5^{|j-k|}` and :math:`\\beta` is a `dim_x`-vector with entries + :math:`\\beta_j=\\frac{1}{j^2}`. + + The data generating process is inspired by a process used in the simulation experiment of Farbmacher, Gruber and + Klaassen (2020). + + Parameters + ---------- + n_obs : + The number of observations to simulate. + dim_x : + The number of covariates. + theta : + The value of the causal parameter. + alpha_x : + The value of the parameter :math:`\\alpha_x`. + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + + If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d, z)``. + + References + ---------- + Farbmacher, H., Guber, R. and Klaaßen, S. (2020). Instrument Validity Tests with Causal Forests. MEA Discussion + Paper No. 13-2020. Available at SSRN: http://dx.doi.org/10.2139/ssrn.3619201. + """ + # inspired by https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3619201 + xx = np.random.multivariate_normal( + np.zeros(2), + np.array([[1.0, 0.3], [0.3, 1.0]]), + size=[ + n_obs, + ], + ) + u = xx[:, 0] + v = xx[:, 1] + + cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs, + ], + ) + + beta = [1 / (k**2) for k in range(1, dim_x + 1)] + + z = np.random.binomial( + p=0.5, + n=1, + size=[ + n_obs, + ], + ) + d = 1.0 * (alpha_x * z + v > 0) + y = d * theta + np.dot(x, beta) + u + + if return_type in _array_alias: + return x, y, d, z + elif return_type in _data_frame_alias + _dml_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] + data = pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["y", "d", "z"]) + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, "y", "d", x_cols, "z") + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/irm/datasets/dgp_irm_data.py b/doubleml/irm/datasets/dgp_irm_data.py new file mode 100644 index 00000000..973902ec --- /dev/null +++ b/doubleml/irm/datasets/dgp_irm_data.py @@ -0,0 +1,103 @@ +import numpy as np +import pandas as pd +from scipy.linalg import toeplitz + +from doubleml.data import DoubleMLData +from doubleml.utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_data_alias + +_array_alias = _get_array_alias() +_data_frame_alias = _get_data_frame_alias() +_dml_data_alias = _get_dml_data_alias() + + +def make_irm_data(n_obs=500, dim_x=20, theta=0, R2_d=0.5, R2_y=0.5, return_type="DoubleMLData"): + """ + Generates data from a interactive regression (IRM) model. + The data generating process is defined as + + .. math:: + + d_i &= 1\\left\\lbrace \\frac{\\exp(c_d x_i' \\beta)}{1+\\exp(c_d x_i' \\beta)} > v_i \\right\\rbrace, & &v_i + \\sim \\mathcal{U}(0,1), + + y_i &= \\theta d_i + c_y x_i' \\beta d_i + \\zeta_i, & &\\zeta_i \\sim \\mathcal{N}(0,1), + + with covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a matrix with entries + :math:`\\Sigma_{kj} = 0.5^{|j-k|}`. + :math:`\\beta` is a `dim_x`-vector with entries :math:`\\beta_j=\\frac{1}{j^2}` and the constants :math:`c_y` and + :math:`c_d` are given by + + .. math:: + + c_y = \\sqrt{\\frac{R_y^2}{(1-R_y^2) \\beta' \\Sigma \\beta}}, \\qquad c_d = + \\sqrt{\\frac{(\\pi^2 /3) R_d^2}{(1-R_d^2) \\beta' \\Sigma \\beta}}. + + The data generating process is inspired by a process used in the simulation experiment (see Appendix P) of Belloni + et al. (2017). + + Parameters + ---------- + n_obs : + The number of observations to simulate. + dim_x : + The number of covariates. + theta : + The value of the causal parameter. + R2_d : + The value of the parameter :math:`R_d^2`. + R2_y : + The value of the parameter :math:`R_y^2`. + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + + If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d)``. + + References + ---------- + Belloni, A., Chernozhukov, V., Fernández‐Val, I. and Hansen, C. (2017). Program Evaluation and Causal Inference With + High‐Dimensional Data. Econometrica, 85: 233-298. + """ + # inspired by https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA12723, see suplement + v = np.random.uniform( + size=[ + n_obs, + ] + ) + zeta = np.random.standard_normal( + size=[ + n_obs, + ] + ) + + cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs, + ], + ) + + beta = [1 / (k**2) for k in range(1, dim_x + 1)] + b_sigma_b = np.dot(np.dot(cov_mat, beta), beta) + c_y = np.sqrt(R2_y / ((1 - R2_y) * b_sigma_b)) + c_d = np.sqrt(np.pi**2 / 3.0 * R2_d / ((1 - R2_d) * b_sigma_b)) + + xx = np.exp(np.dot(x, np.multiply(beta, c_d))) + d = 1.0 * ((xx / (1 + xx)) > v) + + y = d * theta + d * np.dot(x, np.multiply(beta, c_y)) + zeta + + if return_type in _array_alias: + return x, y, d + elif return_type in _data_frame_alias + _dml_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] + data = pd.DataFrame(np.column_stack((x, y, d)), columns=x_cols + ["y", "d"]) + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, "y", "d", x_cols) + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/irm/datasets/dgp_irm_data_discrete_treatments.py b/doubleml/irm/datasets/dgp_irm_data_discrete_treatments.py new file mode 100644 index 00000000..af621c9d --- /dev/null +++ b/doubleml/irm/datasets/dgp_irm_data_discrete_treatments.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.linalg import toeplitz + + +def make_irm_data_discrete_treatments(n_obs=200, n_levels=3, linear=False, random_state=None, **kwargs): + """ + Generates data from a interactive regression (IRM) model with multiple treatment levels (based on an + underlying continous treatment). + + The data generating process is defined as follows (similar to the Monte Carlo simulation used + in Sant'Anna and Zhao (2020)). + + Let :math:`X= (X_1, X_2, X_3, X_4, X_5)^T \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` corresponds + to the identity matrix. + Further, define :math:`Z_j = (\\tilde{Z_j} - \\mathbb{E}[\\tilde{Z}_j]) / \\sqrt{\\text{Var}(\\tilde{Z}_j)}`, + where + + .. math:: + + \\tilde{Z}_1 &= \\exp(0.5 \\cdot X_1) + + \\tilde{Z}_2 &= 10 + X_2/(1 + \\exp(X_1)) + + \\tilde{Z}_3 &= (0.6 + X_1 \\cdot X_3 / 25)^3 + + \\tilde{Z}_4 &= (20 + X_2 + X_4)^2 + + \\tilde{Z}_5 &= X_5. + + A continuous treatment :math:`D_{\\text{cont}}` is generated as + + .. math:: + + D_{\\text{cont}} = \\xi (-Z_1 + 0.5 Z_2 - 0.25 Z_3 - 0.1 Z_4) + \\varepsilon_D, + + where :math:`\\varepsilon_D \\sim \\mathcal{N}(0,1)` and :math:`\\xi=0.3`. The corresponding treatment + effect is defined as + + .. math:: + + \\theta (d) = 0.1 \\exp(d) + 10 \\sin(0.7 d) + 2 d - 0.2 d^2. + + Based on the continous treatment, a discrete treatment :math:`D` is generated as with a baseline level of + :math:`D=0` and additional levels based on the quantiles of :math:`D_{\\text{cont}}`. The number of levels + is defined by :math:`n_{\\text{levels}}`. Each level is chosen to have the same probability of being selected. + + The potential outcomes are defined as + + .. math:: + + Y(0) &= 210 + 27.4 Z_1 + 13.7 (Z_2 + Z_3 + Z_4) + \\varepsilon_Y + + Y(1) &= \\theta (D_{\\text{cont}}) 1\\{D_{\\text{cont}} > 0\\} + Y(0), + + where :math:`\\varepsilon_Y \\sim \\mathcal{N}(0,5)`. Further, the observed outcome is defined as + + .. math:: + + Y = Y(1) 1\\{D > 0\\} + Y(0) 1\\{D = 0\\}. + + The data is returned as a dictionary with the entries ``x``, ``y``, ``d`` and ``oracle_values``. + + Parameters + ---------- + n_obs : int + The number of observations to simulate. + Default is ``200``. + + n_levels : int + The number of treatment levels. + Default is ``3``. + + linear : bool + Indicates whether the true underlying regression is linear. + Default is ``False``. + + random_state : int + Random seed for reproducibility. + Default is ``42``. + + Returns + ------- + res_dict : dictionary + Dictionary with entries ``x``, ``y``, ``d`` and ``oracle_values``. + The oracle values contain the continuous treatment, the level bounds, the potential level, ITE + and the potential outcome without treatment. + + """ + if random_state is not None: + np.random.seed(random_state) + xi = kwargs.get("xi", 0.3) + c = kwargs.get("c", 0.0) + dim_x = kwargs.get("dim_x", 5) + + if not isinstance(n_levels, int): + raise ValueError("n_levels must be an integer.") + if n_levels < 2: + raise ValueError("n_levels must be at least 2.") + + # observed covariates + cov_mat = toeplitz([np.power(c, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs, + ], + ) + + def f_reg(w): + res = 210 + 27.4 * w[:, 0] + 13.7 * (w[:, 1] + w[:, 2] + w[:, 3]) + return res + + def f_treatment(w, xi): + res = xi * (-w[:, 0] + 0.5 * w[:, 1] - 0.25 * w[:, 2] - 0.1 * w[:, 3]) + return res + + def treatment_effect(d, scale=15): + return scale * (1 / (1 + np.exp(-d - 1.2 * np.cos(d)))) - 2 + + z_tilde_1 = np.exp(0.5 * x[:, 0]) + z_tilde_2 = 10 + x[:, 1] / (1 + np.exp(x[:, 0])) + z_tilde_3 = (0.6 + x[:, 0] * x[:, 2] / 25) ** 3 + z_tilde_4 = (20 + x[:, 1] + x[:, 3]) ** 2 + + z_tilde = np.column_stack((z_tilde_1, z_tilde_2, z_tilde_3, z_tilde_4, x[:, 4:])) + z = (z_tilde - np.mean(z_tilde, axis=0)) / np.std(z_tilde, axis=0) + + # error terms + var_eps_y = 5 + eps_y = np.random.normal(loc=0, scale=np.sqrt(var_eps_y), size=n_obs) + var_eps_d = 1 + eps_d = np.random.normal(loc=0, scale=np.sqrt(var_eps_d), size=n_obs) + + if linear: + g = f_reg(x) + m = f_treatment(x, xi) + else: + assert not linear + g = f_reg(z) + m = f_treatment(z, xi) + + cont_d = m + eps_d + level_bounds = np.quantile(cont_d, q=np.linspace(0, 1, n_levels + 1)) + potential_level = sum([1.0 * (cont_d >= bound) for bound in level_bounds[1:-1]]) + 1 + eta = np.random.uniform(0, 1, size=n_obs) + d = 1.0 * (eta >= 1 / n_levels) * potential_level + + ite = treatment_effect(cont_d) + y0 = g + eps_y + # only treated for d > 0 compared to the baseline + y = ite * (d > 0) + y0 + + oracle_values = { + "cont_d": cont_d, + "level_bounds": level_bounds, + "potential_level": potential_level, + "ite": ite, + "y0": y0, + } + + resul_dict = {"x": x, "y": y, "d": d, "oracle_values": oracle_values} + + return resul_dict diff --git a/doubleml/irm/datasets/dgp_ssm_data.py b/doubleml/irm/datasets/dgp_ssm_data.py new file mode 100644 index 00000000..51a33c3a --- /dev/null +++ b/doubleml/irm/datasets/dgp_ssm_data.py @@ -0,0 +1,102 @@ +import numpy as np +import pandas as pd +from scipy.linalg import toeplitz + +from doubleml.data import DoubleMLSSMData +from doubleml.utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_ssm_data_alias + +_array_alias = _get_array_alias() +_data_frame_alias = _get_data_frame_alias() +_dml_ssm_data_alias = _get_dml_ssm_data_alias() + + +def make_ssm_data(n_obs=8000, dim_x=100, theta=1, mar=True, return_type="DoubleMLSSMData"): + """ + Generates data from a sample selection model (SSM). + The data generating process is defined as + + .. math:: + + y_i &= \\theta d_i + x_i' \\beta d_i + u_i, + + s_i &= 1\\left\\lbrace d_i + \\gamma z_i + x_i' \\beta + v_i > 0 \\right\\rbrace, + + d_i &= 1\\left\\lbrace x_i' \\beta + w_i > 0 \\right\\rbrace, + + with Y being observed if :math:`s_i = 1` and covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma^2_x)`, where + :math:`\\Sigma^2_x` is a matrix with entries + :math:`\\Sigma_{kj} = 0.5^{|j-k|}`. + :math:`\\beta` is a `dim_x`-vector with entries :math:`\\beta_j=\\frac{0.4}{j^2}` + :math:`z_i \\sim \\mathcal{N}(0, 1)`, + :math:`(u_i,v_i) \\sim \\mathcal{N}(0, \\Sigma^2_{u,v})`, + :math:`w_i \\sim \\mathcal{N}(0, 1)`. + + + The data generating process is inspired by a process used in the simulation study (see Appendix E) of Bia, + Huber and Lafférs (2023). + + Parameters + ---------- + n_obs : + The number of observations to simulate. + dim_x : + The number of covariates. + theta : + The value of the causal parameter. + mar: + Boolean. Indicates whether missingness at random holds. + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + + If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d, z, s)``. + + References + ---------- + Michela Bia, Martin Huber & Lukáš Lafférs (2023) Double Machine Learning for Sample Selection Models, + Journal of Business & Economic Statistics, DOI: 10.1080/07350015.2023.2271071 + """ + if mar: + sigma = np.array([[1, 0], [0, 1]]) + gamma = 0 + else: + sigma = np.array([[1, 0.8], [0.8, 1]]) + gamma = 1 + + e = np.random.multivariate_normal(mean=[0, 0], cov=sigma, size=n_obs).T + + cov_mat = toeplitz([np.power(0.5, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs, + ], + ) + + beta = [0.4 / (k**2) for k in range(1, dim_x + 1)] + + d = np.where(np.dot(x, beta) + np.random.randn(n_obs) > 0, 1, 0) + z = np.random.randn(n_obs) + s = np.where(np.dot(x, beta) + d + gamma * z + e[0] > 0, 1, 0) + + y = np.dot(x, beta) + theta * d + e[1] + y[s == 0] = 0 + + if return_type in _array_alias: + return x, y, d, z, s + elif return_type in _data_frame_alias + _dml_ssm_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] + if mar: + data = pd.DataFrame(np.column_stack((x, y, d, s)), columns=x_cols + ["y", "d", "s"]) + else: + data = pd.DataFrame(np.column_stack((x, y, d, z, s)), columns=x_cols + ["y", "d", "z", "s"]) + if return_type in _data_frame_alias: + return data + else: + if mar: + return DoubleMLSSMData(data, "y", "d", x_cols, z_cols=None, s_col="s") + return DoubleMLSSMData(data, "y", "d", x_cols, z_cols="z", s_col="s") + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/irm/iivm.py b/doubleml/irm/iivm.py index 73495fd7..fbd33a14 100644 --- a/doubleml/irm/iivm.py +++ b/doubleml/irm/iivm.py @@ -80,7 +80,7 @@ class DoubleMLIIVM(LinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_iivm_data + >>> from doubleml.irm.datasets import make_iivm_data >>> from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier >>> np.random.seed(3141) >>> ml_g = RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2) @@ -142,6 +142,7 @@ def __init__( super().__init__(obj_dml_data, n_folds, n_rep, score, draw_sample_splitting) self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data valid_scores = ["LATE"] _check_score(self.score, valid_scores, allow_callable=True) diff --git a/doubleml/irm/irm.py b/doubleml/irm/irm.py index 76e955f9..f9d8271f 100644 --- a/doubleml/irm/irm.py +++ b/doubleml/irm/irm.py @@ -84,7 +84,7 @@ class DoubleMLIRM(LinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_irm_data + >>> from doubleml.irm.datasets import make_irm_data >>> from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier >>> np.random.seed(3141) >>> ml_g = RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2) @@ -138,6 +138,7 @@ def __init__( super().__init__(obj_dml_data, n_folds, n_rep, score, draw_sample_splitting) self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data valid_scores = ["ATE", "ATTE"] _check_score(self.score, valid_scores, allow_callable=True) diff --git a/doubleml/irm/lpq.py b/doubleml/irm/lpq.py index 4b2377ee..fd51792b 100644 --- a/doubleml/irm/lpq.py +++ b/doubleml/irm/lpq.py @@ -83,7 +83,7 @@ class DoubleMLLPQ(NonLinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_iivm_data + >>> from doubleml.irm.datasets import make_iivm_data >>> from sklearn.ensemble import RandomForestClassifier >>> np.random.seed(3141) >>> ml_g = RandomForestClassifier(n_estimators=100, max_features=20, max_depth=10, min_samples_leaf=2) @@ -125,6 +125,7 @@ def __init__( self._normalize_ipw = normalize_ipw self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data valid_score = ["LPQ"] _check_score(self.score, valid_score, allow_callable=False) diff --git a/doubleml/irm/pq.py b/doubleml/irm/pq.py index 7f40d27d..e515e578 100644 --- a/doubleml/irm/pq.py +++ b/doubleml/irm/pq.py @@ -90,7 +90,7 @@ class DoubleMLPQ(NonLinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_irm_data + >>> from doubleml.irm.datasets import make_irm_data >>> from sklearn.ensemble import RandomForestClassifier >>> np.random.seed(3141) >>> ml_g = RandomForestClassifier(n_estimators=100, max_features=20, max_depth=10, min_samples_leaf=2) @@ -132,6 +132,7 @@ def __init__( self._normalize_ipw = normalize_ipw self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data valid_score = ["PQ"] _check_score(self.score, valid_score, allow_callable=False) diff --git a/doubleml/irm/qte.py b/doubleml/irm/qte.py index 9f617e3e..f896b078 100644 --- a/doubleml/irm/qte.py +++ b/doubleml/irm/qte.py @@ -3,18 +3,18 @@ from joblib import Parallel, delayed from sklearn.base import clone -from doubleml.data import DoubleMLClusterData, DoubleMLData +from doubleml.data import DoubleMLData from doubleml.double_ml_framework import concat +from doubleml.double_ml_sampling_mixins import SampleSplittingMixin from doubleml.irm.cvar import DoubleMLCVAR from doubleml.irm.lpq import DoubleMLLPQ from doubleml.irm.pq import DoubleMLPQ -from doubleml.utils._checks import _check_sample_splitting, _check_score, _check_trimming, _check_zero_one_treatment +from doubleml.utils._checks import _check_score, _check_trimming, _check_zero_one_treatment from doubleml.utils._descriptive import generate_summary from doubleml.utils._estimation import _default_kde -from doubleml.utils.resampling import DoubleMLResampling -class DoubleMLQTE: +class DoubleMLQTE(SampleSplittingMixin): """Double machine learning for quantile treatment effects Parameters @@ -72,7 +72,7 @@ class DoubleMLQTE: -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_irm_data + >>> from doubleml.irm.datasets import make_irm_data >>> from sklearn.ensemble import RandomForestClassifier >>> np.random.seed(3141) >>> ml_g = RandomForestClassifier(n_estimators=100, max_features=20, max_depth=10, min_samples_leaf=2) @@ -124,10 +124,8 @@ def __init__( _check_score(self.score, valid_scores, allow_callable=False) # check data - self._is_cluster_data = False - if isinstance(obj_dml_data, DoubleMLClusterData): - self._is_cluster_data = True self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data # initialize framework which is constructed after the fit method is called self._framework = None @@ -147,10 +145,12 @@ def __init__( # perform sample splitting self._smpls = None + self._n_obs_sample_splitting = self._dml_data.n_obs + self._strata = self._dml_data.d if draw_sample_splitting: self.draw_sample_splitting() # initialize all models - self._modellist_0, self._modellist_1 = self._initialize_models() + self._initialize_dml_model() def __str__(self): class_name = self.__class__.__name__ @@ -439,94 +439,11 @@ def bootstrap(self, method="normal", n_rep_boot=500): if self._framework is None: raise ValueError("Apply fit() before bootstrap().") self._framework.bootstrap(method=method, n_rep_boot=n_rep_boot) - - return self - - def draw_sample_splitting(self): - """ - Draw sample splitting for DoubleML models. - - The samples are drawn according to the attributes - ``n_folds`` and ``n_rep``. - - Returns - ------- - self : object - """ - obj_dml_resampling = DoubleMLResampling( - n_folds=self.n_folds, n_rep=self.n_rep, n_obs=self._dml_data.n_obs, stratify=self._dml_data.d - ) - self._smpls = obj_dml_resampling.split_samples() - # initialize all models - self._modellist_0, self._modellist_1 = self._initialize_models() - return self - def set_sample_splitting(self, all_smpls, all_smpls_cluster=None): - """ - Set the sample splitting for DoubleML models. - - The attributes ``n_folds`` and ``n_rep`` are derived from the provided partition. - - Parameters - ---------- - all_smpls : list or tuple - If nested list of lists of tuples: - The outer list needs to provide an entry per repeated sample splitting (length of list is set as - ``n_rep``). - The inner list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as - ``n_folds``). test_ind must form a partition for each inner list. - If list of tuples: - The list needs to provide a tuple (train_ind, test_ind) per fold (length of list is set as - ``n_folds``). test_ind must form a partition. ``n_rep=1`` is always set. - If tuple: - Must be a tuple with two elements train_ind and test_ind. Only viable option is to set - train_ind and test_ind to np.arange(n_obs), which corresponds to no sample splitting. - ``n_folds=1`` and ``n_rep=1`` is always set. - - all_smpls_cluster : list or None - Nested list or ``None``. The first level of nesting corresponds to the number of repetitions. The second level - of nesting corresponds to the number of folds. The third level of nesting contains a tuple of training and - testing lists. Both training and testing contain an array for each cluster variable, which form a partition of - the clusters. - Default is ``None``. - - Returns - ------- - self : object - - Examples - -------- - >>> import numpy as np - >>> import doubleml as dml - >>> from doubleml.datasets import make_plr_CCDDHNR2018 - >>> from sklearn.ensemble import RandomForestRegressor - >>> from sklearn.base import clone - >>> np.random.seed(3141) - >>> learner = RandomForestRegressor(max_depth=2, n_estimators=10) - >>> ml_g = learner - >>> ml_m = learner - >>> obj_dml_data = make_plr_CCDDHNR2018(n_obs=10, alpha=0.5) - >>> dml_plr_obj = dml.DoubleMLPLR(obj_dml_data, ml_g, ml_m) - >>> dml_plr_obj.set_sample_splitting(smpls) - >>> # sample splitting with two folds and cross-fitting - >>> smpls = [([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), - >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])] - >>> dml_plr_obj.set_sample_splitting(smpls) - >>> # sample splitting with two folds and repeated cross-fitting with n_rep = 2 - >>> smpls = [[([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]), - >>> ([5, 6, 7, 8, 9], [0, 1, 2, 3, 4])], - >>> [([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]), - >>> ([1, 3, 5, 7, 9], [0, 2, 4, 6, 8])]] - >>> dml_plr_obj.set_sample_splitting(smpls) - """ - self._smpls, self._smpls_cluster, self._n_rep, self._n_folds = _check_sample_splitting( - all_smpls, all_smpls_cluster, self._dml_data, self._is_cluster_data - ) - + def _initialize_dml_model(self): # initialize all models self._modellist_0, self._modellist_1 = self._initialize_models() - return self def confint(self, joint=False, level=0.95): diff --git a/doubleml/irm/ssm.py b/doubleml/irm/ssm.py index e7e5d83c..56c58469 100644 --- a/doubleml/irm/ssm.py +++ b/doubleml/irm/ssm.py @@ -6,7 +6,7 @@ from sklearn.model_selection import train_test_split from sklearn.utils import check_X_y -from doubleml.data.base_data import DoubleMLData +from doubleml.data.ssm_data import DoubleMLSSMData from doubleml.double_ml import DoubleML from doubleml.double_ml_score_mixins import LinearScoreMixin from doubleml.utils._checks import _check_finite_predictions, _check_score, _check_trimming @@ -19,8 +19,8 @@ class DoubleMLSSM(LinearScoreMixin, DoubleML): Parameters ---------- - obj_dml_data : :class:`DoubleMLData` object - The :class:`DoubleMLData` object providing the data and specifying the variables for the causal model. + obj_dml_data : :class:`DoubleMLSSMData` object + The :class:`DoubleMLSSMData` object providing the data and specifying the variables for the causal model. ml_g : estimator implementing ``fit()`` and ``predict()`` A machine learner implementing ``fit()`` and ``predict()`` methods (e.g. @@ -66,7 +66,7 @@ class DoubleMLSSM(LinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml import DoubleMLData + >>> from doubleml import DoubleMLSSMData >>> from sklearn.linear_model import LassoCV, LogisticRegressionCV() >>> from sklearn.base import clone >>> np.random.seed(3146) @@ -82,7 +82,7 @@ class DoubleMLSSM(LinearScoreMixin, DoubleML): >>> s = np.where(np.dot(X, beta) + 0.25 * d + z + e[0] > 0, 1, 0) >>> y = np.dot(X, beta) + 0.5 * d + e[1] >>> y[s == 0] = 0 - >>> simul_data = DoubleMLData.from_arrays(X, y, d, z=None, t=s) + >>> simul_data = DoubleMLSSMData.from_arrays(X, y, d, z=None, s=s) >>> learner = LassoCV() >>> learner_class = LogisticRegressionCV() >>> ml_g_sim = clone(learner) @@ -124,6 +124,7 @@ def __init__( _check_trimming(self._trimming_rule, self._trimming_threshold) self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data _check_score(self.score, ["missing-at-random", "nonignorable"]) # for both score function stratification by d and s is viable @@ -183,9 +184,9 @@ def _initialize_ml_nuisance_params(self): self._params = {learner: {key: [None] * self.n_rep for key in self._dml_data.d_cols} for learner in valid_learner} def _check_data(self, obj_dml_data): - if not isinstance(obj_dml_data, DoubleMLData): + if not isinstance(obj_dml_data, DoubleMLSSMData): raise TypeError( - f"The data must be of DoubleMLData type. {str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed." + f"The data must be of DoubleMLSSMData type. {str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed." ) if obj_dml_data.z_cols is not None and self._score == "missing-at-random": warnings.warn( diff --git a/doubleml/irm/tests/conftest.py b/doubleml/irm/tests/conftest.py index 1cf1d525..0a3d4db8 100644 --- a/doubleml/irm/tests/conftest.py +++ b/doubleml/irm/tests/conftest.py @@ -4,7 +4,7 @@ from scipy.linalg import toeplitz from sklearn.datasets import make_spd_matrix -from doubleml.datasets import make_iivm_data, make_irm_data +from doubleml.irm.datasets import make_iivm_data, make_irm_data def _g(x): diff --git a/doubleml/irm/tests/test_apo.py b/doubleml/irm/tests/test_apo.py index df4ec284..7558b7c1 100644 --- a/doubleml/irm/tests/test_apo.py +++ b/doubleml/irm/tests/test_apo.py @@ -8,7 +8,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data, make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_irm_data, make_irm_data_discrete_treatments from ...tests._utils import draw_smpls from ._utils_apo_manual import boot_apo, fit_apo, fit_sensitivity_elements_apo diff --git a/doubleml/irm/tests/test_apo_exceptions.py b/doubleml/irm/tests/test_apo_exceptions.py index cfb6e93b..5991ee5e 100644 --- a/doubleml/irm/tests/test_apo_exceptions.py +++ b/doubleml/irm/tests/test_apo_exceptions.py @@ -5,7 +5,7 @@ from sklearn.linear_model import Lasso, LogisticRegression from doubleml import DoubleMLAPO, DoubleMLData -from doubleml.datasets import make_iivm_data, make_irm_data, make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_iivm_data, make_irm_data, make_irm_data_discrete_treatments n = 100 data_apo = make_irm_data_discrete_treatments(n_obs=n) @@ -22,7 +22,11 @@ @pytest.mark.ci def test_apo_exception_data(): - msg = "The data must be of DoubleMLData or DoubleMLClusterData type." + msg = ( + r"The data must be of DoubleMLData or DoubleMLClusterData or DoubleMLDIDData or DoubleMLSSMData or " + r"DoubleMLRDDData type\. Empty DataFrame\nColumns: \[\]\nIndex: \[\] of type " + r" was passed\." + ) with pytest.raises(TypeError, match=msg): _ = DoubleMLAPO(pd.DataFrame(), ml_g, ml_m, treatment_level=0) diff --git a/doubleml/irm/tests/test_apo_external_predictions.py b/doubleml/irm/tests/test_apo_external_predictions.py index 2bbe50e8..246ef021 100644 --- a/doubleml/irm/tests/test_apo_external_predictions.py +++ b/doubleml/irm/tests/test_apo_external_predictions.py @@ -6,7 +6,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression from doubleml import DoubleMLAPO, DoubleMLData -from doubleml.datasets import make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_irm_data_discrete_treatments from doubleml.utils import DMLDummyClassifier, DMLDummyRegressor from ...tests._utils import draw_smpls diff --git a/doubleml/irm/tests/test_apos.py b/doubleml/irm/tests/test_apos.py index 746cb63c..55a48ced 100644 --- a/doubleml/irm/tests/test_apos.py +++ b/doubleml/irm/tests/test_apos.py @@ -6,7 +6,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data, make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_irm_data, make_irm_data_discrete_treatments from ...tests._utils import confint_manual from ._utils_apos_manual import boot_apos, fit_apos diff --git a/doubleml/irm/tests/test_apos_classfier.py b/doubleml/irm/tests/test_apos_classfier.py index 06fdc308..f9cfc10c 100644 --- a/doubleml/irm/tests/test_apos_classfier.py +++ b/doubleml/irm/tests/test_apos_classfier.py @@ -6,7 +6,7 @@ from sklearn.linear_model import LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_irm_data_discrete_treatments from ...tests._utils import confint_manual from ._utils_apos_manual import boot_apos, fit_apos diff --git a/doubleml/irm/tests/test_apos_exceptions.py b/doubleml/irm/tests/test_apos_exceptions.py index c309b7e2..93274cee 100644 --- a/doubleml/irm/tests/test_apos_exceptions.py +++ b/doubleml/irm/tests/test_apos_exceptions.py @@ -4,7 +4,7 @@ from sklearn.linear_model import Lasso, LogisticRegression from doubleml import DoubleMLAPOS, DoubleMLData -from doubleml.datasets import make_iivm_data, make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_iivm_data, make_irm_data_discrete_treatments n = 100 data = make_irm_data_discrete_treatments(n_obs=n) @@ -20,7 +20,7 @@ @pytest.mark.ci def test_apos_exception_data(): - msg = "The data must be of DoubleMLData or DoubleMLClusterData type." + msg = "The data must be of DoubleMLData type." with pytest.raises(TypeError, match=msg): _ = DoubleMLAPOS(pd.DataFrame(), ml_g, ml_m, treatment_levels=0) diff --git a/doubleml/irm/tests/test_apos_external_predictions.py b/doubleml/irm/tests/test_apos_external_predictions.py index 9e97de07..ed4323ad 100644 --- a/doubleml/irm/tests/test_apos_external_predictions.py +++ b/doubleml/irm/tests/test_apos_external_predictions.py @@ -6,7 +6,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression from doubleml import DoubleMLAPOS, DoubleMLData -from doubleml.datasets import make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_irm_data_discrete_treatments from doubleml.utils import DMLDummyClassifier, DMLDummyRegressor from ...tests._utils import draw_smpls diff --git a/doubleml/irm/tests/test_apos_weighted_scores.py b/doubleml/irm/tests/test_apos_weighted_scores.py index ea612dec..6d0a7f65 100644 --- a/doubleml/irm/tests/test_apos_weighted_scores.py +++ b/doubleml/irm/tests/test_apos_weighted_scores.py @@ -6,7 +6,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data_discrete_treatments +from doubleml.irm.datasets import make_irm_data_discrete_treatments @pytest.fixture( diff --git a/doubleml/irm/tests/test_iivm_external_predictions.py b/doubleml/irm/tests/test_iivm_external_predictions.py index 7f4626e9..d71d2bb5 100644 --- a/doubleml/irm/tests/test_iivm_external_predictions.py +++ b/doubleml/irm/tests/test_iivm_external_predictions.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression from doubleml import DoubleMLData, DoubleMLIIVM -from doubleml.datasets import make_iivm_data +from doubleml.irm.datasets import make_iivm_data from doubleml.utils import DMLDummyClassifier, DMLDummyRegressor diff --git a/doubleml/irm/tests/test_irm.py b/doubleml/irm/tests/test_irm.py index f99f2253..856c7f59 100644 --- a/doubleml/irm/tests/test_irm.py +++ b/doubleml/irm/tests/test_irm.py @@ -8,7 +8,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data from doubleml.utils.resampling import DoubleMLResampling from ...tests._utils import draw_smpls diff --git a/doubleml/irm/tests/test_irm_external_predictions.py b/doubleml/irm/tests/test_irm_external_predictions.py index dabf6c0e..5d0412d5 100644 --- a/doubleml/irm/tests/test_irm_external_predictions.py +++ b/doubleml/irm/tests/test_irm_external_predictions.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression from doubleml import DoubleMLData, DoubleMLIRM -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data from doubleml.utils import DMLDummyClassifier, DMLDummyRegressor diff --git a/doubleml/irm/tests/test_lpq_external_predictions.py b/doubleml/irm/tests/test_lpq_external_predictions.py index 66f2ece6..48cb42f5 100644 --- a/doubleml/irm/tests/test_lpq_external_predictions.py +++ b/doubleml/irm/tests/test_lpq_external_predictions.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LogisticRegression from doubleml import DoubleMLData, DoubleMLLPQ -from doubleml.datasets import make_iivm_data +from doubleml.irm.datasets import make_iivm_data from doubleml.utils import DMLDummyClassifier from ...tests._utils import draw_smpls diff --git a/doubleml/irm/tests/test_pq_external_predictions.py b/doubleml/irm/tests/test_pq_external_predictions.py index 28f8ec66..9674c464 100644 --- a/doubleml/irm/tests/test_pq_external_predictions.py +++ b/doubleml/irm/tests/test_pq_external_predictions.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LogisticRegression from doubleml import DoubleMLData, DoubleMLPQ -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data from doubleml.utils import DMLDummyClassifier from ...tests._utils import draw_smpls diff --git a/doubleml/irm/tests/test_qte.py b/doubleml/irm/tests/test_qte.py index 0557c85b..7fcbeec2 100644 --- a/doubleml/irm/tests/test_qte.py +++ b/doubleml/irm/tests/test_qte.py @@ -8,7 +8,7 @@ from sklearn.linear_model import LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data from ...tests._utils import confint_manual, draw_smpls from ...utils._estimation import _default_kde diff --git a/doubleml/irm/tests/test_qte_exceptions.py b/doubleml/irm/tests/test_qte_exceptions.py index 9f94f5d4..f4e95110 100644 --- a/doubleml/irm/tests/test_qte_exceptions.py +++ b/doubleml/irm/tests/test_qte_exceptions.py @@ -6,7 +6,7 @@ from doubleml import DoubleMLData, DoubleMLQTE from doubleml.data.base_data import DoubleMLBaseData -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data np.random.seed(42) n = 100 diff --git a/doubleml/irm/tests/test_ssm.py b/doubleml/irm/tests/test_ssm.py index b157794b..c561d9fe 100644 --- a/doubleml/irm/tests/test_ssm.py +++ b/doubleml/irm/tests/test_ssm.py @@ -54,11 +54,11 @@ def dml_selection_fixture( np.random.seed(42) if score == "missing-at-random": - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d, z=None, s=s) + obj_dml_data = dml.DoubleMLSSMData.from_arrays(x, y, d, z=None, s=s) dml_sel_obj = dml.DoubleMLSSM(obj_dml_data, ml_g, ml_pi, ml_m, n_folds=n_folds, score=score) else: assert score == "nonignorable" - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d, z=z, s=s) + obj_dml_data = dml.DoubleMLSSMData.from_arrays(x, y, d, z=z, s=s) dml_sel_obj = dml.DoubleMLSSM(obj_dml_data, ml_g, ml_pi, ml_m, n_folds=n_folds, score=score) np.random.seed(42) diff --git a/doubleml/irm/tests/test_ssm_exceptions.py b/doubleml/irm/tests/test_ssm_exceptions.py index 6ff276e3..6df76908 100644 --- a/doubleml/irm/tests/test_ssm_exceptions.py +++ b/doubleml/irm/tests/test_ssm_exceptions.py @@ -6,7 +6,7 @@ from doubleml import DoubleMLSSM from doubleml.data.base_data import DoubleMLBaseData -from doubleml.datasets import make_ssm_data +from doubleml.irm.datasets import make_ssm_data np.random.seed(3141) n = 100 @@ -22,6 +22,7 @@ class DummyDataClass(DoubleMLBaseData): def __init__(self, data): DoubleMLBaseData.__init__(self, data) + self.is_cluster_data = False @property def n_coefs(self): @@ -30,11 +31,15 @@ def n_coefs(self): @pytest.mark.ci def test_ssm_exception_data(): - msg = "The data must be of DoubleMLData or DoubleMLClusterData type." + msg = ( + r"The data must be of DoubleMLData or DoubleMLClusterData or DoubleMLDIDData or DoubleMLSSMData or " + r"DoubleMLRDDData type\. Empty DataFrame\nColumns: \[\]\nIndex: \[\] of type " + r" was passed\." + ) with pytest.raises(TypeError, match=msg): _ = DoubleMLSSM(pd.DataFrame(), ml_g, ml_pi, ml_m) - msg = "The data must be of DoubleMLData type." + msg = "The data must be of DoubleMLSSMData type." with pytest.raises(TypeError, match=msg): _ = DoubleMLSSM(DummyDataClass(pd.DataFrame(np.zeros((100, 10)))), ml_g, ml_pi, ml_m) diff --git a/doubleml/irm/tests/test_ssm_tune.py b/doubleml/irm/tests/test_ssm_tune.py index 0fafbc13..4e48bec3 100644 --- a/doubleml/irm/tests/test_ssm_tune.py +++ b/doubleml/irm/tests/test_ssm_tune.py @@ -76,7 +76,7 @@ def dml_ssm_fixture( np.random.seed(42) if score == "missing-at-random": - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d, z=None, s=s) + obj_dml_data = dml.DoubleMLSSMData.from_arrays(x, y, d, z=None, s=s) dml_sel_obj = dml.DoubleMLSSM( obj_dml_data, ml_g, @@ -89,7 +89,7 @@ def dml_ssm_fixture( ) else: assert score == "nonignorable" - obj_dml_data = dml.DoubleMLData.from_arrays(x, y, d, z=z, s=s) + obj_dml_data = dml.DoubleMLSSMData.from_arrays(x, y, d, z=z, s=s) dml_sel_obj = dml.DoubleMLSSM( obj_dml_data, ml_g, diff --git a/doubleml/plm/datasets/__init__.py b/doubleml/plm/datasets/__init__.py new file mode 100644 index 00000000..b2bb7df0 --- /dev/null +++ b/doubleml/plm/datasets/__init__.py @@ -0,0 +1,19 @@ +""" +The :mod:`doubleml.plm.datasets` module implements data generating processes for partially linear models. +""" + +from ._make_pliv_data import _make_pliv_data +from .dgp_confounded_plr_data import make_confounded_plr_data +from .dgp_pliv_CHS2015 import make_pliv_CHS2015 +from .dgp_pliv_multiway_cluster_CKMS2021 import make_pliv_multiway_cluster_CKMS2021 +from .dgp_plr_CCDDHNR2018 import make_plr_CCDDHNR2018 +from .dgp_plr_turrell2018 import make_plr_turrell2018 + +__all__ = [ + "make_plr_CCDDHNR2018", + "make_plr_turrell2018", + "make_confounded_plr_data", + "make_pliv_CHS2015", + "make_pliv_multiway_cluster_CKMS2021", + "_make_pliv_data", +] diff --git a/doubleml/plm/datasets/_make_pliv_data.py b/doubleml/plm/datasets/_make_pliv_data.py new file mode 100644 index 00000000..deb7cc53 --- /dev/null +++ b/doubleml/plm/datasets/_make_pliv_data.py @@ -0,0 +1,70 @@ +""" +Helper function for partially linear IV data generation. +""" + +import numpy as np +import pandas as pd +from sklearn.datasets import make_spd_matrix + +from doubleml.data import DoubleMLData +from doubleml.utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_data_alias + +_array_alias = _get_array_alias() +_data_frame_alias = _get_data_frame_alias() +_dml_data_alias = _get_dml_data_alias() + + +def _g(x): + return np.power(np.sin(x), 2) + + +def _m(x, nu=0.0, gamma=1.0): + return 0.5 / np.pi * (np.sinh(gamma)) / (np.cosh(gamma) - np.cos(x - nu)) + + +def _make_pliv_data(n_obs=100, dim_x=20, theta=0.5, gamma_z=0.4, return_type="DoubleMLData"): + b = [1 / k for k in range(1, dim_x + 1)] + sigma = make_spd_matrix(dim_x) + + x = np.random.multivariate_normal( + np.zeros(dim_x), + sigma, + size=[ + n_obs, + ], + ) + G = _g(np.dot(x, b)) + # instrument + z = _m(np.dot(x, b)) + np.random.standard_normal( + size=[ + n_obs, + ] + ) + # treatment + M = _m(gamma_z * z + np.dot(x, b)) + d = M + np.random.standard_normal( + size=[ + n_obs, + ] + ) + y = ( + np.dot(theta, d) + + G + + np.random.standard_normal( + size=[ + n_obs, + ] + ) + ) + + if return_type in _array_alias: + return x, y, d, z + elif return_type in _data_frame_alias + _dml_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] + data = pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["y", "d", "z"]) + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, "y", "d", x_cols, "z") + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/plm/datasets/dgp_confounded_plr_data.py b/doubleml/plm/datasets/dgp_confounded_plr_data.py new file mode 100644 index 00000000..794e3db1 --- /dev/null +++ b/doubleml/plm/datasets/dgp_confounded_plr_data.py @@ -0,0 +1,171 @@ +import numpy as np +from scipy.linalg import toeplitz +from scipy.optimize import minimize_scalar + + +def make_confounded_plr_data(n_obs=500, theta=5.0, cf_y=0.04, cf_d=0.04, **kwargs): + """ + Generates counfounded data from an partially linear regression model. + + The data generating process is defined as follows (similar to the Monte Carlo simulation used + in Sant'Anna and Zhao (2020)). Let :math:`X= (X_1, X_2, X_3, X_4, X_5)^T \\sim \\mathcal{N}(0, \\Sigma)`, + where :math:`\\Sigma` is a matrix with entries + :math:`\\Sigma_{kj} = c^{|j-k|}`. The default value is :math:`c = 0`, corresponding to the identity matrix. + Further, define :math:`Z_j = (\\tilde{Z_j} - \\mathbb{E}[\\tilde{Z}_j]) / \\sqrt{\\text{Var}(\\tilde{Z}_j)}`, + where + + .. math:: + + \\tilde{Z}_1 &= \\exp(0.5 \\cdot X_1) + + \\tilde{Z}_2 &= 10 + X_2/(1 + \\exp(X_1)) + + \\tilde{Z}_3 &= (0.6 + X_1 \\cdot X_3 / 25)^3 + + \\tilde{Z}_4 &= (20 + X_2 + X_4)^2. + + Additionally, generate a confounder :math:`A \\sim \\mathcal{U}[-1, 1]`. + At first, define the treatment as + + .. math:: + + D = -Z_1 + 0.5 \\cdot Z_2 - 0.25 \\cdot Z_3 - 0.1 \\cdot Z_4 + \\gamma_A \\cdot A + \\varepsilon_D + + and with :math:`\\varepsilon \\sim \\mathcal{N}(0,1)`. + Since :math:`A` is independent of :math:`X`, the long and short form of the treatment regression are given as + + .. math:: + + E[D|X,A] = -Z_1 + 0.5 \\cdot Z_2 - 0.25 \\cdot Z_3 - 0.1 \\cdot Z_4 + \\gamma_A \\cdot A + + E[D|X] = -Z_1 + 0.5 \\cdot Z_2 - 0.25 \\cdot Z_3 - 0.1 \\cdot Z_4. + + Further, generate the outcome of interest :math:`Y` as + + .. math:: + + Y &= \\theta \\cdot D + g(Z) + \\beta_A \\cdot A + \\varepsilon + + g(Z) &= 210 + 27.4 \\cdot Z_1 +13.7 \\cdot (Z_2 + Z_3 + Z_4) + + where :math:`\\varepsilon \\sim \\mathcal{N}(0,5)`. + This implies an average treatment effect of :math:`\\theta`. Additionally, the long and short forms of + the conditional expectation take the following forms + + .. math:: + + \\mathbb{E}[Y|D, X, A] &= \\theta \\cdot D + g(Z) + \\beta_A \\cdot A + + \\mathbb{E}[Y|D, X] &= (\\theta + \\gamma_A\\beta_A \\frac{\\mathrm{Var}(A)}{\\mathrm{Var}(D)}) \\cdot D + g(Z). + + Consequently, the strength of confounding is determined via :math:`\\gamma_A` and :math:`\\beta_A`. + Both are chosen to obtain the desired confounding of the outcome and Riesz Representer (in sample). + + The observed data is given as :math:`W = (Y, D, X)`. + Further, orcale values of the confounder :math:`A`, the transformed covariated :math:`Z`, the effect :math:`\\theta`, + the coefficients :math:`\\gamma_a`, :math:`\\beta_a`, the long and short forms of the main regression and + the propensity score are returned in a dictionary. + + Parameters + ---------- + n_obs : int + The number of observations to simulate. + Default is ``500``. + theta : float or int + Average treatment effect. + Default is ``5.0``. + cf_y : float + Percentage of the residual variation of the outcome explained by latent/confounding variable. + Default is ``0.04``. + cf_d : float + Percentage gains in the variation of the Riesz Representer generated by latent/confounding variable. + Default is ``0.04``. + + Returns + ------- + res_dict : dictionary + Dictionary with entries ``x``, ``y``, ``d`` and ``oracle_values``. + + References + ---------- + Sant'Anna, P. H. and Zhao, J. (2020), + Doubly robust difference-in-differences estimators. Journal of Econometrics, 219(1), 101-122. + doi:`10.1016/j.jeconom.2020.06.003 `_. + """ + c = kwargs.get("c", 0.0) + dim_x = kwargs.get("dim_x", 4) + + # observed covariates + cov_mat = toeplitz([np.power(c, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs, + ], + ) + + z_tilde_1 = np.exp(0.5 * x[:, 0]) + z_tilde_2 = 10 + x[:, 1] / (1 + np.exp(x[:, 0])) + z_tilde_3 = (0.6 + x[:, 0] * x[:, 2] / 25) ** 3 + z_tilde_4 = (20 + x[:, 1] + x[:, 3]) ** 2 + + z_tilde = np.column_stack((z_tilde_1, z_tilde_2, z_tilde_3, z_tilde_4, x[:, 4:])) + z = (z_tilde - np.mean(z_tilde, axis=0)) / np.std(z_tilde, axis=0) + + # error terms + var_eps_y = 5 + eps_y = np.random.normal(loc=0, scale=np.sqrt(var_eps_y), size=n_obs) + var_eps_d = 1 + eps_d = np.random.normal(loc=0, scale=np.sqrt(var_eps_d), size=n_obs) + + # unobserved confounder + a_bounds = (-1, 1) + a = np.random.uniform(low=a_bounds[0], high=a_bounds[1], size=n_obs) + var_a = np.square(a_bounds[1] - a_bounds[0]) / 12 + + # get the required impact of the confounder on the propensity score + m_short = -z[:, 0] + 0.5 * z[:, 1] - 0.25 * z[:, 2] - 0.1 * z[:, 3] + + def f_m(gamma_a): + rr_long = eps_d / var_eps_d + rr_short = (gamma_a * a + eps_d) / (gamma_a**2 * var_a + var_eps_d) + C2_D = (np.mean(np.square(rr_long)) - np.mean(np.square(rr_short))) / np.mean(np.square(rr_short)) + return np.square(C2_D / (1 + C2_D) - cf_d) + + gamma_a = minimize_scalar(f_m).x + m_long = m_short + gamma_a * a + d = m_long + eps_d + + # short and long version of g + g_partial_reg = 210 + 27.4 * z[:, 0] + 13.7 * (z[:, 1] + z[:, 2] + z[:, 3]) + + var_d = np.var(d) + + def f_g(beta_a): + g_diff = beta_a * (a - gamma_a * (var_a / var_d) * d) + y_diff = eps_y + g_diff + return np.square(np.mean(np.square(g_diff)) / np.mean(np.square(y_diff)) - cf_y) + + beta_a = minimize_scalar(f_g).x + + g_long = theta * d + g_partial_reg + beta_a * a + g_short = (theta + gamma_a * beta_a * var_a / var_d) * d + g_partial_reg + + y = g_long + eps_y + + oracle_values = { + "g_long": g_long, + "g_short": g_short, + "m_long": m_long, + "m_short": m_short, + "theta": theta, + "gamma_a": gamma_a, + "beta_a": beta_a, + "a": a, + "z": z, + } + + res_dict = {"x": x, "y": y, "d": d, "oracle_values": oracle_values} + + return res_dict diff --git a/doubleml/plm/datasets/dgp_pliv_CHS2015.py b/doubleml/plm/datasets/dgp_pliv_CHS2015.py new file mode 100644 index 00000000..7542803a --- /dev/null +++ b/doubleml/plm/datasets/dgp_pliv_CHS2015.py @@ -0,0 +1,108 @@ +import numpy as np +import pandas as pd +from scipy.linalg import toeplitz + +from doubleml.data import DoubleMLData +from doubleml.utils._aliases import _array_alias, _data_frame_alias, _dml_data_alias + + +def make_pliv_CHS2015(n_obs, alpha=1.0, dim_x=200, dim_z=150, return_type="DoubleMLData"): + """ + Generates data from a partially linear IV regression model used in Chernozhukov, Hansen and Spindler (2015). + The data generating process is defined as + + .. math:: + + z_i &= \\Pi x_i + \\zeta_i, + + d_i &= x_i' \\gamma + z_i' \\delta + u_i, + + y_i &= \\alpha d_i + x_i' \\beta + \\varepsilon_i, + + with + + .. math:: + + \\left(\\begin{matrix} \\varepsilon_i \\\\ u_i \\\\ \\zeta_i \\\\ x_i \\end{matrix} \\right) \\sim + \\mathcal{N}\\left(0, \\left(\\begin{matrix} 1 & 0.6 & 0 & 0 \\\\ 0.6 & 1 & 0 & 0 \\\\ + 0 & 0 & 0.25 I_{p_n^z} & 0 \\\\ 0 & 0 & 0 & \\Sigma \\end{matrix} \\right) \\right) + + where :math:`\\Sigma` is a :math:`p_n^x \\times p_n^x` matrix with entries + :math:`\\Sigma_{kj} = 0.5^{|j-k|}` and :math:`I_{p_n^z}` is the :math:`p_n^z \\times p_n^z` identity matrix. + :math:`\\beta = \\gamma` is a :math:`p_n^x`-vector with entries :math:`\\beta_j=\\frac{1}{j^2}`, + :math:`\\delta` is a :math:`p_n^z`-vector with entries :math:`\\delta_j=\\frac{1}{j^2}` + and :math:`\\Pi = (I_{p_n^z}, 0_{p_n^z \\times (p_n^x - p_n^z)})`. + + Parameters + ---------- + n_obs : + The number of observations to simulate. + alpha : + The value of the causal parameter. + dim_x : + The number of covariates. + dim_z : + The number of instruments. + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + + If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d, z)``. + + References + ---------- + Chernozhukov, V., Hansen, C. and Spindler, M. (2015), Post-Selection and Post-Regularization Inference in Linear + Models with Many Controls and Instruments. American Economic Review: Papers and Proceedings, 105 (5): 486-90. + """ + assert dim_x >= dim_z + # see https://assets.aeaweb.org/asset-server/articles-attachments/aer/app/10505/P2015_1022_app.pdf + xx = np.random.multivariate_normal( + np.zeros(2), + np.array([[1.0, 0.6], [0.6, 1.0]]), + size=[ + n_obs, + ], + ) + epsilon = xx[:, 0] + u = xx[:, 1] + + sigma = toeplitz([np.power(0.5, k) for k in range(0, dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + sigma, + size=[ + n_obs, + ], + ) + + I_z = np.eye(dim_z) + xi = np.random.multivariate_normal( + np.zeros(dim_z), + 0.25 * I_z, + size=[ + n_obs, + ], + ) + + beta = [1 / (k**2) for k in range(1, dim_x + 1)] + gamma = beta + delta = [1 / (k**2) for k in range(1, dim_z + 1)] + Pi = np.hstack((I_z, np.zeros((dim_z, dim_x - dim_z)))) + + z = np.dot(x, np.transpose(Pi)) + xi + d = np.dot(x, gamma) + np.dot(z, delta) + u + y = alpha * d + np.dot(x, beta) + epsilon + + if return_type in _array_alias: + return x, y, d, z + elif return_type in _data_frame_alias + _dml_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] + z_cols = [f"Z{i + 1}" for i in np.arange(dim_z)] + data = pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["y", "d"] + z_cols) + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, "y", "d", x_cols, z_cols) + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/plm/datasets/dgp_pliv_multiway_cluster_CKMS2021.py b/doubleml/plm/datasets/dgp_pliv_multiway_cluster_CKMS2021.py new file mode 100644 index 00000000..3ccec0f7 --- /dev/null +++ b/doubleml/plm/datasets/dgp_pliv_multiway_cluster_CKMS2021.py @@ -0,0 +1,199 @@ +import numpy as np +import pandas as pd +from scipy.linalg import toeplitz + +from doubleml.data import DoubleMLData +from doubleml.utils._aliases import _array_alias, _data_frame_alias, _dml_data_alias + + +def make_pliv_multiway_cluster_CKMS2021(N=25, M=25, dim_X=100, theta=1.0, return_type="DoubleMLData", **kwargs): + """ + Generates data from a partially linear IV regression model with multiway cluster sample used in Chiang et al. + (2021). The data generating process is defined as + + .. math:: + + Z_{ij} &= X_{ij}' \\xi_0 + V_{ij}, + + D_{ij} &= Z_{ij}' \\pi_{10} + X_{ij}' \\pi_{20} + v_{ij}, + + Y_{ij} &= D_{ij} \\theta + X_{ij}' \\zeta_0 + \\varepsilon_{ij}, + + with + + .. math:: + + X_{ij} &= (1 - \\omega_1^X - \\omega_2^X) \\alpha_{ij}^X + + \\omega_1^X \\alpha_{i}^X + \\omega_2^X \\alpha_{j}^X, + + \\varepsilon_{ij} &= (1 - \\omega_1^\\varepsilon - \\omega_2^\\varepsilon) \\alpha_{ij}^\\varepsilon + + \\omega_1^\\varepsilon \\alpha_{i}^\\varepsilon + \\omega_2^\\varepsilon \\alpha_{j}^\\varepsilon, + + v_{ij} &= (1 - \\omega_1^v - \\omega_2^v) \\alpha_{ij}^v + + \\omega_1^v \\alpha_{i}^v + \\omega_2^v \\alpha_{j}^v, + + V_{ij} &= (1 - \\omega_1^V - \\omega_2^V) \\alpha_{ij}^V + + \\omega_1^V \\alpha_{i}^V + \\omega_2^V \\alpha_{j}^V, + + and :math:`\\alpha_{ij}^X, \\alpha_{i}^X, \\alpha_{j}^X \\sim \\mathcal{N}(0, \\Sigma)` + where :math:`\\Sigma` is a :math:`p_x \\times p_x` matrix with entries + :math:`\\Sigma_{kj} = s_X^{|j-k|}`. + Further + + .. math:: + + \\left(\\begin{matrix} \\alpha_{ij}^\\varepsilon \\\\ \\alpha_{ij}^v \\end{matrix}\\right), + \\left(\\begin{matrix} \\alpha_{i}^\\varepsilon \\\\ \\alpha_{i}^v \\end{matrix}\\right), + \\left(\\begin{matrix} \\alpha_{j}^\\varepsilon \\\\ \\alpha_{j}^v \\end{matrix}\\right) + \\sim \\mathcal{N}\\left(0, \\left(\\begin{matrix} 1 & s_{\\varepsilon v} \\\\ + s_{\\varepsilon v} & 1 \\end{matrix} \\right) \\right) + + + and :math:`\\alpha_{ij}^V, \\alpha_{i}^V, \\alpha_{j}^V \\sim \\mathcal{N}(0, 1)`. + + Parameters + ---------- + N : + The number of observations (first dimension). + M : + The number of observations (second dimension). + dim_X : + The number of covariates. + theta : + The value of the causal parameter. + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object where + ``DoubleMLData.data`` is a ``pd.DataFrame``. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + + If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s + ``(x, y, d, cluster_vars, z)``. + **kwargs + Additional keyword arguments to set non-default values for the parameters + :math:`\\pi_{10}=1.0`, :math:`\\omega_X = \\omega_{\\varepsilon} = \\omega_V = \\omega_v = (0.25, 0.25)`, + :math:`s_X = s_{\\varepsilon v} = 0.25`, + or the :math:`p_x`-vectors :math:`\\zeta_0 = \\pi_{20} = \\xi_0` with default entries + :math:`(\\zeta_{0})_j = 0.5^j`. + + References + ---------- + Chiang, H. D., Kato K., Ma, Y. and Sasaki, Y. (2021), Multiway Cluster Robust Double/Debiased Machine Learning, + Journal of Business & Economic Statistics, + doi: `10.1080/07350015.2021.1895815 `_, + arXiv:`1909.03489 `_. + """ + # additional parameters specifiable via kwargs + pi_10 = kwargs.get("pi_10", 1.0) + + xx = np.arange(1, dim_X + 1) + zeta_0 = kwargs.get("zeta_0", np.power(0.5, xx)) + pi_20 = kwargs.get("pi_20", np.power(0.5, xx)) + xi_0 = kwargs.get("xi_0", np.power(0.5, xx)) + + omega_X = kwargs.get("omega_X", np.array([0.25, 0.25])) + omega_epsilon = kwargs.get("omega_epsilon", np.array([0.25, 0.25])) + omega_v = kwargs.get("omega_v", np.array([0.25, 0.25])) + omega_V = kwargs.get("omega_V", np.array([0.25, 0.25])) + + s_X = kwargs.get("s_X", 0.25) + s_epsilon_v = kwargs.get("s_epsilon_v", 0.25) + + # use np.tile() and np.repeat() for repeating vectors in different styles, i.e., + # np.tile([v1, v2, v3], 2) [v1, v2, v3, v1, v2, v3] + # np.repeat([v1, v2, v3], 2) [v1, v1, v2, v2, v3, v3] + + alpha_V = np.random.normal(size=(N * M)) + alpha_V_i = np.repeat(np.random.normal(size=N), M) + alpha_V_j = np.tile(np.random.normal(size=M), N) + + cov_mat = np.array([[1, s_epsilon_v], [s_epsilon_v, 1]]) + alpha_eps_v = np.random.multivariate_normal( + np.zeros(2), + cov_mat, + size=[ + N * M, + ], + ) + alpha_eps = alpha_eps_v[:, 0] + alpha_v = alpha_eps_v[:, 1] + + alpha_eps_v_i = np.random.multivariate_normal( + np.zeros(2), + cov_mat, + size=[ + N, + ], + ) + alpha_eps_i = np.repeat(alpha_eps_v_i[:, 0], M) + alpha_v_i = np.repeat(alpha_eps_v_i[:, 1], M) + + alpha_eps_v_j = np.random.multivariate_normal( + np.zeros(2), + cov_mat, + size=[ + M, + ], + ) + alpha_eps_j = np.tile(alpha_eps_v_j[:, 0], N) + alpha_v_j = np.tile(alpha_eps_v_j[:, 1], N) + + cov_mat = toeplitz([np.power(s_X, k) for k in range(dim_X)]) + alpha_X = np.random.multivariate_normal( + np.zeros(dim_X), + cov_mat, + size=[ + N * M, + ], + ) + alpha_X_i = np.repeat( + np.random.multivariate_normal( + np.zeros(dim_X), + cov_mat, + size=[ + N, + ], + ), + M, + axis=0, + ) + alpha_X_j = np.tile( + np.random.multivariate_normal( + np.zeros(dim_X), + cov_mat, + size=[ + M, + ], + ), + (N, 1), + ) + + # generate variables + x = (1 - omega_X[0] - omega_X[1]) * alpha_X + omega_X[0] * alpha_X_i + omega_X[1] * alpha_X_j + + eps = ( + (1 - omega_epsilon[0] - omega_epsilon[1]) * alpha_eps + omega_epsilon[0] * alpha_eps_i + omega_epsilon[1] * alpha_eps_j + ) + + v = (1 - omega_v[0] - omega_v[1]) * alpha_v + omega_v[0] * alpha_v_i + omega_v[1] * alpha_v_j + + V = (1 - omega_V[0] - omega_V[1]) * alpha_V + omega_V[0] * alpha_V_i + omega_V[1] * alpha_V_j + + z = np.matmul(x, xi_0) + V + d = z * pi_10 + np.matmul(x, pi_20) + v + y = d * theta + np.matmul(x, zeta_0) + eps + + cluster_cols = ["cluster_var_i", "cluster_var_j"] + cluster_vars = pd.MultiIndex.from_product([range(N), range(M)]).to_frame(name=cluster_cols).reset_index(drop=True) + + if return_type in _array_alias: + return x, y, d, cluster_vars.values, z + elif return_type in _data_frame_alias + _dml_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_X)] + data = pd.concat((cluster_vars, pd.DataFrame(np.column_stack((x, y, d, z)), columns=x_cols + ["Y", "D", "Z"])), axis=1) + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, y_col="Y", d_cols="D", cluster_cols=cluster_cols, x_cols=x_cols, z_cols="Z") + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/plm/datasets/dgp_plr_CCDDHNR2018.py b/doubleml/plm/datasets/dgp_plr_CCDDHNR2018.py new file mode 100644 index 00000000..7d6fdf9e --- /dev/null +++ b/doubleml/plm/datasets/dgp_plr_CCDDHNR2018.py @@ -0,0 +1,108 @@ +import numpy as np +import pandas as pd +from scipy.linalg import toeplitz + +from doubleml.data import DoubleMLData +from doubleml.utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_data_alias + +_array_alias = _get_array_alias() +_data_frame_alias = _get_data_frame_alias() +_dml_data_alias = _get_dml_data_alias() + + +def make_plr_CCDDHNR2018(n_obs=500, dim_x=20, alpha=0.5, return_type="DoubleMLData", **kwargs): + """ + Generates data from a partially linear regression model used in Chernozhukov et al. (2018) for Figure 1. + The data generating process is defined as + + .. math:: + + d_i &= m_0(x_i) + s_1 v_i, & &v_i \\sim \\mathcal{N}(0,1), + + y_i &= \\alpha d_i + g_0(x_i) + s_2 \\zeta_i, & &\\zeta_i \\sim \\mathcal{N}(0,1), + + + with covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a matrix with entries + :math:`\\Sigma_{kj} = 0.7^{|j-k|}`. + The nuisance functions are given by + + .. math:: + + m_0(x_i) &= a_0 x_{i,1} + a_1 \\frac{\\exp(x_{i,3})}{1+\\exp(x_{i,3})}, + + g_0(x_i) &= b_0 \\frac{\\exp(x_{i,1})}{1+\\exp(x_{i,1})} + b_1 x_{i,3}. + + Parameters + ---------- + n_obs : + The number of observations to simulate. + dim_x : + The number of covariates. + alpha : + The value of the causal parameter. + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + + If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d)``. + **kwargs + Additional keyword arguments to set non-default values for the parameters + :math:`a_0=1`, :math:`a_1=0.25`, :math:`s_1=1`, :math:`b_0=1`, :math:`b_1=0.25` or :math:`s_2=1`. + + References + ---------- + Chernozhukov, V., Chetverikov, D., Demirer, M., Duflo, E., Hansen, C., Newey, W. and Robins, J. (2018), + Double/debiased machine learning for treatment and structural parameters. The Econometrics Journal, 21: C1-C68. + doi:`10.1111/ectj.12097 `_. + """ + a_0 = kwargs.get("a_0", 1.0) + a_1 = kwargs.get("a_1", 0.25) + s_1 = kwargs.get("s_1", 1.0) + + b_0 = kwargs.get("b_0", 1.0) + b_1 = kwargs.get("b_1", 0.25) + s_2 = kwargs.get("s_2", 1.0) + + cov_mat = toeplitz([np.power(0.7, k) for k in range(dim_x)]) + x = np.random.multivariate_normal( + np.zeros(dim_x), + cov_mat, + size=[ + n_obs, + ], + ) + + d = ( + a_0 * x[:, 0] + + a_1 * np.divide(np.exp(x[:, 2]), 1 + np.exp(x[:, 2])) + + s_1 + * np.random.standard_normal( + size=[ + n_obs, + ] + ) + ) + y = ( + alpha * d + + b_0 * np.divide(np.exp(x[:, 0]), 1 + np.exp(x[:, 0])) + + b_1 * x[:, 2] + + s_2 + * np.random.standard_normal( + size=[ + n_obs, + ] + ) + ) + + if return_type in _array_alias: + return x, y, d + elif return_type in _data_frame_alias + _dml_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] + data = pd.DataFrame(np.column_stack((x, y, d)), columns=x_cols + ["y", "d"]) + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, "y", "d", x_cols) + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/plm/datasets/dgp_plr_turrell2018.py b/doubleml/plm/datasets/dgp_plr_turrell2018.py new file mode 100644 index 00000000..5cfefdd8 --- /dev/null +++ b/doubleml/plm/datasets/dgp_plr_turrell2018.py @@ -0,0 +1,107 @@ +import numpy as np +import pandas as pd +from sklearn.datasets import make_spd_matrix + +from doubleml.data import DoubleMLData +from doubleml.utils._aliases import _get_array_alias, _get_data_frame_alias, _get_dml_data_alias + +_array_alias = _get_array_alias() +_data_frame_alias = _get_data_frame_alias() +_dml_data_alias = _get_dml_data_alias() + + +def _g(x): + return np.power(np.sin(x), 2) + + +def _m(x, nu=0.0, gamma=1.0): + return 0.5 / np.pi * (np.sinh(gamma)) / (np.cosh(gamma) - np.cos(x - nu)) + + +def make_plr_turrell2018(n_obs=100, dim_x=20, theta=0.5, return_type="DoubleMLData", **kwargs): + """ + Generates data from a partially linear regression model used in a blog article by Turrell (2018). + The data generating process is defined as + + .. math:: + + d_i &= m_0(x_i' b) + v_i, & &v_i \\sim \\mathcal{N}(0,1), + + y_i &= \\theta d_i + g_0(x_i' b) + u_i, & &u_i \\sim \\mathcal{N}(0,1), + + + with covariates :math:`x_i \\sim \\mathcal{N}(0, \\Sigma)`, where :math:`\\Sigma` is a random symmetric, + positive-definite matrix generated with :py:meth:`sklearn.datasets.make_spd_matrix`. + :math:`b` is a vector with entries :math:`b_j=\\frac{1}{j}` and the nuisance functions are given by + + .. math:: + + m_0(x_i) &= \\frac{1}{2 \\pi} \\frac{\\sinh(\\gamma)}{\\cosh(\\gamma) - \\cos(x_i-\\nu)}, + + g_0(x_i) &= \\sin(x_i)^2. + + Parameters + ---------- + n_obs : + The number of observations to simulate. + dim_x : + The number of covariates. + theta : + The value of the causal parameter. + return_type : + If ``'DoubleMLData'`` or ``DoubleMLData``, returns a ``DoubleMLData`` object. + + If ``'DataFrame'``, ``'pd.DataFrame'`` or ``pd.DataFrame``, returns a ``pd.DataFrame``. + + If ``'array'``, ``'np.ndarray'``, ``'np.array'`` or ``np.ndarray``, returns ``np.ndarray``'s ``(x, y, d)``. + **kwargs + Additional keyword arguments to set non-default values for the parameters + :math:`\\nu=0`, or :math:`\\gamma=1`. + + References + ---------- + Turrell, A. (2018), Econometrics in Python part I - Double machine learning, Markov Wanderer: A blog on economics, + science, coding and data. `https://aeturrell.com/blog/posts/econometrics-in-python-parti-ml/ + `_. + """ + nu = kwargs.get("nu", 0.0) + gamma = kwargs.get("gamma", 1.0) + + b = [1 / k for k in range(1, dim_x + 1)] + sigma = make_spd_matrix(dim_x) + + x = np.random.multivariate_normal( + np.zeros(dim_x), + sigma, + size=[ + n_obs, + ], + ) + G = _g(np.dot(x, b)) + M = _m(np.dot(x, b), nu=nu, gamma=gamma) + d = M + np.random.standard_normal( + size=[ + n_obs, + ] + ) + y = ( + np.dot(theta, d) + + G + + np.random.standard_normal( + size=[ + n_obs, + ] + ) + ) + + if return_type in _array_alias: + return x, y, d + elif return_type in _data_frame_alias + _dml_data_alias: + x_cols = [f"X{i + 1}" for i in np.arange(dim_x)] + data = pd.DataFrame(np.column_stack((x, y, d)), columns=x_cols + ["y", "d"]) + if return_type in _data_frame_alias: + return data + else: + return DoubleMLData(data, "y", "d", x_cols) + else: + raise ValueError("Invalid return_type.") diff --git a/doubleml/plm/pliv.py b/doubleml/plm/pliv.py index fdf4e28d..385d5c67 100644 --- a/doubleml/plm/pliv.py +++ b/doubleml/plm/pliv.py @@ -62,7 +62,7 @@ class DoubleMLPLIV(LinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_pliv_CHS2015 + >>> from doubleml.plm.datasets import make_pliv_CHS2015 >>> from sklearn.ensemble import RandomForestRegressor >>> from sklearn.base import clone >>> np.random.seed(3141) @@ -108,6 +108,7 @@ def __init__( super().__init__(obj_dml_data, n_folds, n_rep, score, draw_sample_splitting) self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data self.partialX = True self.partialZ = False self._check_score(self.score) diff --git a/doubleml/plm/plr.py b/doubleml/plm/plr.py index 30ad763e..db6b5a48 100644 --- a/doubleml/plm/plr.py +++ b/doubleml/plm/plr.py @@ -60,7 +60,7 @@ class DoubleMLPLR(LinearScoreMixin, DoubleML): -------- >>> import numpy as np >>> import doubleml as dml - >>> from doubleml.datasets import make_plr_CCDDHNR2018 + >>> from doubleml.plm.datasets import make_plr_CCDDHNR2018 >>> from sklearn.ensemble import RandomForestRegressor >>> from sklearn.base import clone >>> np.random.seed(3141) @@ -93,6 +93,7 @@ def __init__( super().__init__(obj_dml_data, n_folds, n_rep, score, draw_sample_splitting) self._check_data(self._dml_data) + self._is_cluster_data = self._dml_data.is_cluster_data valid_scores = ["IV-type", "partialling out"] _check_score(self.score, valid_scores, allow_callable=True) diff --git a/doubleml/plm/tests/conftest.py b/doubleml/plm/tests/conftest.py index 497d6fc9..cfde0f41 100644 --- a/doubleml/plm/tests/conftest.py +++ b/doubleml/plm/tests/conftest.py @@ -4,7 +4,7 @@ from scipy.linalg import toeplitz from sklearn.datasets import make_spd_matrix -from doubleml.datasets import make_pliv_CHS2015, make_plr_turrell2018 +from doubleml.plm.datasets import make_pliv_CHS2015, make_plr_turrell2018 def _g(x): diff --git a/doubleml/plm/tests/test_pliv_external_predictions.py b/doubleml/plm/tests/test_pliv_external_predictions.py index bc8a1e8a..55c362ab 100644 --- a/doubleml/plm/tests/test_pliv_external_predictions.py +++ b/doubleml/plm/tests/test_pliv_external_predictions.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LinearRegression from doubleml import DoubleMLData, DoubleMLPLIV -from doubleml.datasets import make_pliv_CHS2015 +from doubleml.plm.datasets import make_pliv_CHS2015 from doubleml.utils import DMLDummyRegressor diff --git a/doubleml/plm/tests/test_plr.py b/doubleml/plm/tests/test_plr.py index 79f21f84..65f5ad83 100644 --- a/doubleml/plm/tests/test_plr.py +++ b/doubleml/plm/tests/test_plr.py @@ -304,7 +304,7 @@ def test_dml_plr_cate_gate(score, cov_type): # collect data np.random.seed(42) - obj_dml_data = dml.datasets.make_plr_CCDDHNR2018(n_obs=n) + obj_dml_data = dml.plm.datasets.make_plr_CCDDHNR2018(n_obs=n) ml_l = LinearRegression() ml_g = LinearRegression() ml_m = LinearRegression() diff --git a/doubleml/plm/tests/test_plr_external_predictions.py b/doubleml/plm/tests/test_plr_external_predictions.py index 47644555..160052b1 100644 --- a/doubleml/plm/tests/test_plr_external_predictions.py +++ b/doubleml/plm/tests/test_plr_external_predictions.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LinearRegression from doubleml import DoubleMLData, DoubleMLPLR -from doubleml.datasets import make_plr_CCDDHNR2018 +from doubleml.plm.datasets import make_plr_CCDDHNR2018 from doubleml.utils import DMLDummyRegressor diff --git a/doubleml/rdd/rdd.py b/doubleml/rdd/rdd.py index 34550142..f9811c9c 100644 --- a/doubleml/rdd/rdd.py +++ b/doubleml/rdd/rdd.py @@ -7,7 +7,7 @@ from sklearn.base import clone from sklearn.utils.multiclass import type_of_target -from doubleml import DoubleMLData +from doubleml import DoubleMLRDDData from doubleml.double_ml import DoubleML from doubleml.rdd._utils import _is_rdrobust_available from doubleml.utils._checks import _check_resampling_specification, _check_supports_sample_weights @@ -22,8 +22,8 @@ class RDFlex: Parameters ---------- - obj_dml_data : :class:`DoubleMLData` object - The :class:`DoubleMLData` object providing the data and specifying the variables for the causal model. + obj_dml_data : :class:`DoubleMLRDDData` object + The :class:`DoubleMLRDDData` object providing the data and specifying the variables for the causal model. ml_g : estimator implementing ``fit()`` and ``predict()`` A machine learner implementing ``fit()`` and ``predict()`` methods and support ``sample_weights`` (e.g. @@ -82,7 +82,12 @@ class RDFlex: >>> from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier >>> np.random.seed(123) >>> data_dict = make_simple_rdd_data(fuzzy=True) - >>> obj_dml_data = dml.DoubleMLData.from_arrays(x=data_dict["X"], y=data_dict["Y"], d=data_dict["D"], s=data_dict["score"]) + >>> obj_dml_data = dml.DoubleMLRDDData.from_arrays( + ... x=data_dict["X"], + ... y=data_dict["Y"], + ... d=data_dict["D"], + ... s=data_dict["score"] + ... ) >>> ml_g = RandomForestRegressor() >>> ml_m = RandomForestClassifier() >>> rdflex_obj = dml.rdd.RDFlex(obj_dml_data, ml_g, ml_m, fuzzy=True) @@ -114,8 +119,9 @@ def __init__( self._check_data(obj_dml_data, cutoff) self._dml_data = obj_dml_data + self._is_cluster_data = self._dml_data.is_cluster_data - self._score = self._dml_data.s - cutoff + self._score = self._dml_data.score - cutoff self._cutoff = cutoff self._intendend_treatment = (self._score >= 0).astype(bool) self._fuzzy = fuzzy @@ -150,7 +156,6 @@ def __init__( "Iterative bandwidth selection will be overwritten by provided values." ) ) - self.kwargs = kwargs self._smpls = DoubleMLResampling( @@ -468,7 +473,11 @@ def _fit_rdd(self, h=None, b=None): ) else: rdd_res = rdrobust.rdrobust( - y=self._M_Y[:, self._i_rep], x=self._score, fuzzy=None, c=0, **({"h": h, "b": b} | self.kwargs) + y=self._M_Y[:, self._i_rep], + x=self._score, + fuzzy=None, + c=0, + **({"h": h, "b": b} | self.kwargs), ) return rdd_res @@ -495,21 +504,24 @@ def _initialize_arrays(self): return M_Y, M_D, h, rdd_obj, all_coef, all_se, all_ci def _check_data(self, obj_dml_data, cutoff): - if not isinstance(obj_dml_data, DoubleMLData): + if not isinstance(obj_dml_data, DoubleMLRDDData): raise TypeError( - f"The data must be of DoubleMLData type. {str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed." + f"The data must be of DoubleMLRDDData type. {str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed." ) + if obj_dml_data.is_cluster_data: + raise NotImplementedError("Clustered data is not supported for RDFlex yet.") + # score checks - if obj_dml_data.s_col is None: + if obj_dml_data.score_col is None: raise ValueError("Incompatible data. " + "Score variable has not been set. ") - is_continuous = type_of_target(obj_dml_data.s) == "continuous" + is_continuous = type_of_target(obj_dml_data.score) == "continuous" if not is_continuous: raise ValueError("Incompatible data. " + "Score variable has to be continuous. ") if not isinstance(cutoff, (int, float)): raise TypeError(f"Cutoff value has to be a float or int. Object of type {str(type(cutoff))} passed.") - if not (obj_dml_data.s.min() <= cutoff <= obj_dml_data.s.max()): + if not (obj_dml_data.score.min() <= cutoff <= obj_dml_data.score.max()): raise ValueError("Cutoff value is not within the range of the score variable. ") # treatment checks diff --git a/doubleml/rdd/tests/conftest.py b/doubleml/rdd/tests/conftest.py index b279ea93..9d13deaf 100644 --- a/doubleml/rdd/tests/conftest.py +++ b/doubleml/rdd/tests/conftest.py @@ -3,7 +3,7 @@ import pytest from sklearn.dummy import DummyClassifier, DummyRegressor -from doubleml import DoubleMLData +from doubleml import DoubleMLRDDData from doubleml.rdd import RDFlex from doubleml.rdd._utils import _is_rdrobust_available from doubleml.rdd.datasets import make_simple_rdd_data @@ -24,7 +24,7 @@ def predict_dummy(): - make predictions using rdrobust as a reference """ - def _predict_dummy(data: DoubleMLData, cutoff, alpha, n_rep, p, fs_specification, ml_g=ml_g_dummy): + def _predict_dummy(data: DoubleMLRDDData, cutoff, alpha, n_rep, p, fs_specification, ml_g=ml_g_dummy): dml_rdflex = RDFlex( data, ml_g=ml_g, ml_m=ml_m_dummy, cutoff=cutoff, n_rep=n_rep, p=p, fs_specification=fs_specification ) @@ -35,7 +35,7 @@ def _predict_dummy(data: DoubleMLData, cutoff, alpha, n_rep, p, fs_specification msg = "rdrobust is not installed. Please install it using 'pip install DoubleML[rdd]'" raise ImportError(msg) - rdrobust_model = rdrobust.rdrobust(y=data.y, x=data.s, c=cutoff, level=100 * (1 - alpha), p=p) + rdrobust_model = rdrobust.rdrobust(y=data.y, x=data.score, c=cutoff, level=100 * (1 - alpha), p=p) reference = { "model": rdrobust_model, @@ -81,7 +81,7 @@ def generate_data(n_obs: int, fuzzy: str, cutoff: float, binary_outcome: bool = columns = ["y", "d", "score"] + ["x" + str(i) for i in range(data["X"].shape[1])] df = pd.DataFrame(np.column_stack((data["Y"], data["D"], data["score"], data["X"])), columns=columns) - return DoubleMLData(df, y_col="y", d_cols="d", s_col="score") + return DoubleMLRDDData(df, y_col="y", d_cols="d", score_col="score") @pytest.fixture(scope="module") diff --git a/doubleml/rdd/tests/test_rdd_classifier.py b/doubleml/rdd/tests/test_rdd_classifier.py index 199fe327..1103b957 100644 --- a/doubleml/rdd/tests/test_rdd_classifier.py +++ b/doubleml/rdd/tests/test_rdd_classifier.py @@ -18,7 +18,7 @@ np.column_stack((data["Y_bin"], data["D"], data["score"], data["X"])), columns=["y", "d", "score"] + ["x" + str(i) for i in range(data["X"].shape[1])], ) -dml_data = dml.DoubleMLData(df, y_col="y", d_cols="d", s_col="score") +dml_data = dml.DoubleMLRDDData(df, y_col="y", d_cols="d", score_col="score") @pytest.mark.ci_rdd diff --git a/doubleml/rdd/tests/test_rdd_default_values.py b/doubleml/rdd/tests/test_rdd_default_values.py index 2f0657f1..b2fdcf29 100644 --- a/doubleml/rdd/tests/test_rdd_default_values.py +++ b/doubleml/rdd/tests/test_rdd_default_values.py @@ -15,7 +15,7 @@ np.column_stack((data["Y"], data["D"], data["score"], data["X"])), columns=["y", "d", "score"] + ["x" + str(i) for i in range(data["X"].shape[1])], ) -dml_data = dml.DoubleMLData(df, y_col="y", d_cols="d", s_col="score") +dml_data = dml.DoubleMLRDDData(df, y_col="y", d_cols="d", score_col="score") def _assert_resampling_default_settings(dml_obj): diff --git a/doubleml/rdd/tests/test_rdd_exceptions.py b/doubleml/rdd/tests/test_rdd_exceptions.py index d1b6e01a..30153151 100644 --- a/doubleml/rdd/tests/test_rdd_exceptions.py +++ b/doubleml/rdd/tests/test_rdd_exceptions.py @@ -6,7 +6,7 @@ from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin from sklearn.linear_model import Lasso, LogisticRegression -from doubleml import DoubleMLData +from doubleml import DoubleMLRDDData from doubleml.rdd import RDFlex from doubleml.rdd.datasets import make_simple_rdd_data @@ -17,7 +17,7 @@ columns=["y", "d", "score"] + ["x" + str(i) for i in range(data["X"].shape[1])], ) -dml_data = DoubleMLData(df, y_col="y", d_cols="d", s_col="score") +dml_data = DoubleMLRDDData(df, y_col="y", d_cols="d", score_col="score") ml_g = Lasso() ml_m = LogisticRegression() @@ -58,21 +58,28 @@ def predict_proba(self, X): @pytest.mark.ci_rdd def test_rdd_exception_data(): - # DoubleMLData - msg = r"The data must be of DoubleMLData type. \[\] of type was passed." + # DoubleMLRDDData + msg = r"The data must be of DoubleMLRDDData type. \[\] of type was passed." with pytest.raises(TypeError, match=msg): _ = RDFlex([], ml_g) + # Clusters not implemented + msg = "Clustered data is not supported for RDFlex yet." + with pytest.raises(NotImplementedError, match=msg): + dml_data_clusters = copy.deepcopy(dml_data) + dml_data_clusters._is_cluster_data = True + _ = RDFlex(dml_data_clusters, ml_g, ml_m) + # score column msg = "Incompatible data. Score variable has not been set. " with pytest.raises(ValueError, match=msg): tmp_dml_data = copy.deepcopy(dml_data) - tmp_dml_data._s_col = None + tmp_dml_data._score_col = None _ = RDFlex(tmp_dml_data, ml_g) msg = "Incompatible data. Score variable has to be continuous. " with pytest.raises(ValueError, match=msg): tmp_dml_data = copy.deepcopy(dml_data) - tmp_dml_data._s = tmp_dml_data._d + tmp_dml_data._score = tmp_dml_data._d _ = RDFlex(tmp_dml_data, ml_g) # existing instruments @@ -128,7 +135,7 @@ def test_rdd_warning_treatment_assignment(): ) with pytest.warns(UserWarning, match=msg): tmp_dml_data = copy.deepcopy(dml_data) - tmp_dml_data._s = -1.0 * tmp_dml_data._s + tmp_dml_data._score = -1.0 * tmp_dml_data._score _ = RDFlex(tmp_dml_data, ml_g, ml_m, fuzzy=True) @@ -169,7 +176,7 @@ def test_rdd_exception_learner(): ) with pytest.warns(UserWarning, match=msg): tmp_dml_data = copy.deepcopy(dml_data) - tmp_dml_data._data["sharp_d"] = tmp_dml_data.s >= 0 + tmp_dml_data._data["sharp_d"] = tmp_dml_data.score >= 0 tmp_dml_data.d_cols = "sharp_d" _ = RDFlex(tmp_dml_data, ml_g, ml_m, fuzzy=False) diff --git a/doubleml/rdd/tests/test_rdd_return_types.py b/doubleml/rdd/tests/test_rdd_return_types.py index 13248afd..f7e02427 100644 --- a/doubleml/rdd/tests/test_rdd_return_types.py +++ b/doubleml/rdd/tests/test_rdd_return_types.py @@ -15,7 +15,7 @@ np.column_stack((data["Y"], data["D"], data["score"], data["X"])), columns=["y", "d", "score"] + ["x" + str(i) for i in range(data["X"].shape[1])], ) -dml_data = dml.DoubleMLData(df, y_col="y", d_cols="d", s_col="score") +dml_data = dml.DoubleMLRDDData(df, y_col="y", d_cols="d", score_col="score") def _assert_return_types(dml_obj): diff --git a/doubleml/tests/_utils.py b/doubleml/tests/_utils.py index a241b58a..907d03d1 100644 --- a/doubleml/tests/_utils.py +++ b/doubleml/tests/_utils.py @@ -11,6 +11,7 @@ class DummyDataClass(DoubleMLBaseData): def __init__(self, data): DoubleMLBaseData.__init__(self, data) + self.is_cluster_data = False @property def n_coefs(self): diff --git a/doubleml/tests/conftest.py b/doubleml/tests/conftest.py index bf53d788..6abea18c 100644 --- a/doubleml/tests/conftest.py +++ b/doubleml/tests/conftest.py @@ -4,7 +4,7 @@ from sklearn.datasets import make_classification, make_regression, make_spd_matrix from doubleml import DoubleMLData -from doubleml.datasets import make_pliv_CHS2015, make_plr_turrell2018 +from doubleml.plm.datasets import make_pliv_CHS2015, make_plr_turrell2018 def _g(x): diff --git a/doubleml/tests/test_datasets.py b/doubleml/tests/test_datasets.py index 67f612e8..f69b681e 100644 --- a/doubleml/tests/test_datasets.py +++ b/doubleml/tests/test_datasets.py @@ -2,22 +2,23 @@ import pandas as pd import pytest -from doubleml import DoubleMLClusterData, DoubleMLData -from doubleml.datasets import ( - _make_pliv_data, - fetch_401K, - fetch_bonus, +from doubleml import DoubleMLData +from doubleml.datasets import fetch_401K, fetch_bonus +from doubleml.irm.datasets import ( make_confounded_irm_data, - make_confounded_plr_data, make_heterogeneous_data, make_iivm_data, make_irm_data, make_irm_data_discrete_treatments, + make_ssm_data, +) +from doubleml.plm.datasets import ( + _make_pliv_data, + make_confounded_plr_data, make_pliv_CHS2015, make_pliv_multiway_cluster_CKMS2021, make_plr_CCDDHNR2018, make_plr_turrell2018, - make_ssm_data, ) msg_inv_return_type = "Invalid return_type." @@ -150,8 +151,8 @@ def test_make_pliv_CHS2015_return_types(): @pytest.mark.ci def test_make_pliv_multiway_cluster_CKMS2021_return_types(): np.random.seed(3141) - res = make_pliv_multiway_cluster_CKMS2021(N=10, M=10, return_type="DoubleMLClusterData") - assert isinstance(res, DoubleMLClusterData) + res = make_pliv_multiway_cluster_CKMS2021(N=10, M=10, return_type="DoubleMLData") + assert isinstance(res, DoubleMLData) res = make_pliv_multiway_cluster_CKMS2021(N=10, M=10, return_type="DataFrame") assert isinstance(res, pd.DataFrame) x, y, d, cluster_vars, z = make_pliv_multiway_cluster_CKMS2021(N=10, M=10, return_type="array") diff --git a/doubleml/tests/test_evaluate_learner.py b/doubleml/tests/test_evaluate_learner.py index dbad9b62..2c5d3f9a 100644 --- a/doubleml/tests/test_evaluate_learner.py +++ b/doubleml/tests/test_evaluate_learner.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data from doubleml.utils._estimation import _logloss np.random.seed(3141) diff --git a/doubleml/tests/test_exceptions.py b/doubleml/tests/test_exceptions.py index a4655bb9..56cb61dc 100644 --- a/doubleml/tests/test_exceptions.py +++ b/doubleml/tests/test_exceptions.py @@ -8,11 +8,11 @@ from doubleml import ( DoubleMLBLP, - DoubleMLClusterData, DoubleMLCVAR, DoubleMLData, DoubleMLDID, DoubleMLDIDCS, + DoubleMLDIDData, DoubleMLIIVM, DoubleMLIRM, DoubleMLLPQ, @@ -21,14 +21,9 @@ DoubleMLPQ, DoubleMLQTE, ) -from doubleml.datasets import ( - make_iivm_data, - make_irm_data, - make_pliv_CHS2015, - make_pliv_multiway_cluster_CKMS2021, - make_plr_CCDDHNR2018, -) from doubleml.did.datasets import make_did_SZ2020 +from doubleml.irm.datasets import make_iivm_data, make_irm_data +from doubleml.plm.datasets import make_pliv_CHS2015, make_pliv_multiway_cluster_CKMS2021, make_plr_CCDDHNR2018 from ._utils import DummyDataClass @@ -47,6 +42,7 @@ dml_data_irm = make_irm_data(n_obs=n) dml_data_iivm = make_iivm_data(n_obs=n) +dml_data_iivm_did = DoubleMLDIDData(dml_data_iivm.data, y_col="y", d_cols="d", z_cols="z") dml_cluster_data_pliv = make_pliv_multiway_cluster_CKMS2021(N=10, M=10) dml_data_did = make_did_SZ2020(n_obs=n) dml_data_did_cs = make_did_SZ2020(n_obs=n, cross_sectional_data=True) @@ -59,7 +55,10 @@ @pytest.mark.ci def test_doubleml_exception_data(): - msg = "The data must be of DoubleMLData or DoubleMLClusterData type." + msg = ( + "The data must be of DoubleMLData or DoubleMLClusterData or DoubleMLDIDData or " + "DoubleMLSSMData or DoubleMLRDDData type." + ) with pytest.raises(TypeError, match=msg): _ = DoubleMLPLR(pd.DataFrame(), ml_l, ml_m) @@ -80,10 +79,10 @@ def test_doubleml_exception_data(): _ = DoubleMLCVAR(DummyDataClass(pd.DataFrame(np.zeros((100, 10)))), ml_g, ml_m, treatment=1) with pytest.raises(TypeError, match=msg): _ = DoubleMLQTE(DummyDataClass(pd.DataFrame(np.zeros((100, 10)))), ml_g, ml_m) - msg = "For repeated outcomes the data must be of DoubleMLData type." + msg = "For repeated outcomes the data must be of DoubleMLDIDData type." with pytest.raises(TypeError, match=msg): _ = DoubleMLDID(DummyDataClass(pd.DataFrame(np.zeros((100, 10)))), ml_g, ml_m) - msg = "For repeated cross sections the data must be of DoubleMLData type. " + msg = "For repeated cross sections the data must be of DoubleMLDIDData type. " with pytest.raises(TypeError, match=msg): _ = DoubleMLDIDCS(DummyDataClass(pd.DataFrame(np.zeros((100, 10)))), ml_g, ml_m) @@ -241,7 +240,7 @@ def test_doubleml_exception_data(): # DID with IV msg = r"Incompatible data. z have been set as instrumental variable\(s\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLDID(dml_data_iivm, Lasso(), LogisticRegression()) + _ = DoubleMLDID(dml_data_iivm_did, Lasso(), LogisticRegression()) msg = ( "Incompatible data. To fit an DID model with DML exactly one binary variable with values 0 and 1 " "needs to be specified as treatment variable." @@ -250,16 +249,16 @@ def test_doubleml_exception_data(): df_irm["d"] = df_irm["d"] * 2 with pytest.raises(ValueError, match=msg): # non-binary D for DID - _ = DoubleMLDID(DoubleMLData(df_irm, "y", "d"), Lasso(), LogisticRegression()) + _ = DoubleMLDID(DoubleMLDIDData(df_irm, "y", "d"), Lasso(), LogisticRegression()) df_irm = dml_data_irm.data.copy() with pytest.raises(ValueError, match=msg): # multiple D for DID - _ = DoubleMLDID(DoubleMLData(df_irm, "y", ["d", "X1"]), Lasso(), LogisticRegression()) + _ = DoubleMLDID(DoubleMLDIDData(df_irm, "y", ["d", "X1"]), Lasso(), LogisticRegression()) # DIDCS with IV msg = r"Incompatible data. z have been set as instrumental variable\(s\)." with pytest.raises(ValueError, match=msg): - _ = DoubleMLDIDCS(dml_data_iivm, Lasso(), LogisticRegression()) + _ = DoubleMLDIDCS(dml_data_iivm_did, Lasso(), LogisticRegression()) # DIDCS treatment exceptions msg = ( @@ -270,11 +269,11 @@ def test_doubleml_exception_data(): df_did_cs["d"] = df_did_cs["d"] * 2 with pytest.raises(ValueError, match=msg): # non-binary D for DIDCS - _ = DoubleMLDIDCS(DoubleMLData(df_did_cs, y_col="y", d_cols="d", t_col="t"), Lasso(), LogisticRegression()) + _ = DoubleMLDIDCS(DoubleMLDIDData(df_did_cs, y_col="y", d_cols="d", t_col="t"), Lasso(), LogisticRegression()) df_did_cs = dml_data_did_cs.data.copy() with pytest.raises(ValueError, match=msg): # multiple D for DIDCS - _ = DoubleMLDIDCS(DoubleMLData(df_did_cs, y_col="y", d_cols=["d", "Z1"], t_col="t"), Lasso(), LogisticRegression()) + _ = DoubleMLDIDCS(DoubleMLDIDData(df_did_cs, y_col="y", d_cols=["d", "Z1"], t_col="t"), Lasso(), LogisticRegression()) # DIDCS time exceptions msg = ( @@ -285,7 +284,7 @@ def test_doubleml_exception_data(): df_did_cs["t"] = df_did_cs["t"] * 2 with pytest.raises(ValueError, match=msg): # non-binary t for DIDCS - _ = DoubleMLDIDCS(DoubleMLData(df_did_cs, y_col="y", d_cols="d", t_col="t"), Lasso(), LogisticRegression()) + _ = DoubleMLDIDCS(DoubleMLDIDData(df_did_cs, y_col="y", d_cols="d", t_col="t"), Lasso(), LogisticRegression()) @pytest.mark.ci @@ -1356,7 +1355,7 @@ def test_doubleml_cluster_not_yet_implemented(): df = dml_cluster_data_pliv.data.copy() df["cluster_var_k"] = df["cluster_var_i"] + df["cluster_var_j"] - 2 - dml_cluster_data_multiway = DoubleMLClusterData( + dml_cluster_data_multiway = DoubleMLData( df, y_col="Y", d_cols="D", diff --git a/doubleml/tests/test_exceptions_ext_preds.py b/doubleml/tests/test_exceptions_ext_preds.py index 3f600282..a65b6ebb 100644 --- a/doubleml/tests/test_exceptions_ext_preds.py +++ b/doubleml/tests/test_exceptions_ext_preds.py @@ -2,7 +2,7 @@ from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from doubleml import DoubleMLCVAR, DoubleMLData, DoubleMLIRM, DoubleMLQTE -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data from doubleml.utils import DMLDummyClassifier, DMLDummyRegressor df_irm = make_irm_data(n_obs=10, dim_x=2, theta=0.5, return_type="DataFrame") diff --git a/doubleml/tests/test_framework.py b/doubleml/tests/test_framework.py index 24810b68..13222664 100644 --- a/doubleml/tests/test_framework.py +++ b/doubleml/tests/test_framework.py @@ -3,8 +3,8 @@ import pytest from sklearn.linear_model import LinearRegression, LogisticRegression -from doubleml.datasets import make_irm_data from doubleml.double_ml_framework import DoubleMLFramework, concat +from doubleml.irm.datasets import make_irm_data from doubleml.irm.irm import DoubleMLIRM from ._utils import generate_dml_dict diff --git a/doubleml/tests/test_model_defaults.py b/doubleml/tests/test_model_defaults.py index f55a555c..b04117eb 100644 --- a/doubleml/tests/test_model_defaults.py +++ b/doubleml/tests/test_model_defaults.py @@ -4,14 +4,9 @@ from sklearn.linear_model import Lasso, LogisticRegression import doubleml as dml -from doubleml.datasets import ( - make_iivm_data, - make_irm_data, - make_pliv_CHS2015, - make_plr_CCDDHNR2018, - make_ssm_data, -) from doubleml.did.datasets import make_did_SZ2020 +from doubleml.irm.datasets import make_iivm_data, make_irm_data, make_ssm_data +from doubleml.plm.datasets import make_pliv_CHS2015, make_plr_CCDDHNR2018 np.random.seed(3141) dml_data_plr = make_plr_CCDDHNR2018(n_obs=100) diff --git a/doubleml/tests/test_multiway_cluster.py b/doubleml/tests/test_multiway_cluster.py index b064024f..f22e913b 100644 --- a/doubleml/tests/test_multiway_cluster.py +++ b/doubleml/tests/test_multiway_cluster.py @@ -6,7 +6,7 @@ from sklearn.linear_model import Lasso, LinearRegression import doubleml as dml -from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021 +from doubleml.plm.datasets import make_pliv_multiway_cluster_CKMS2021 from ..plm.tests._utils_pliv_manual import compute_pliv_residuals, fit_pliv from ._utils import _clone @@ -288,7 +288,7 @@ def dml_plr_cluster_with_index(generate_data1, learner): dml_plr_obj.fit() df = data.reset_index() - dml_cluster_data = dml.DoubleMLClusterData(df, y_col="y", d_cols="d", x_cols=x_cols, cluster_cols="index") + dml_cluster_data = dml.DoubleMLData(df, y_col="y", d_cols="d", x_cols=x_cols, cluster_cols="index") np.random.seed(3141) dml_plr_cluster_obj = dml.DoubleMLPLR(dml_cluster_data, ml_l, ml_m, n_folds=n_folds) np.random.seed(3141) diff --git a/doubleml/tests/test_nonlinear_cluster.py b/doubleml/tests/test_nonlinear_cluster.py index f84f3e2e..0623b665 100644 --- a/doubleml/tests/test_nonlinear_cluster.py +++ b/doubleml/tests/test_nonlinear_cluster.py @@ -7,7 +7,8 @@ from sklearn.linear_model import Lasso, LinearRegression import doubleml as dml -from doubleml.datasets import DoubleMLClusterData, make_pliv_multiway_cluster_CKMS2021 +from doubleml import DoubleMLData +from doubleml.plm.datasets import make_pliv_multiway_cluster_CKMS2021 from .test_nonlinear_score_mixin import DoubleMLPLRWithNonLinearScoreMixin @@ -19,7 +20,7 @@ # create data without insturment for plr x, y, d, cluster_vars, z = make_pliv_multiway_cluster_CKMS2021(N, M, dim_x, return_type="array") -obj_dml_cluster_data = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars) +obj_dml_cluster_data = DoubleMLData.from_arrays(x, y, d, cluster_vars) x, y, d, cluster_vars, z = make_pliv_multiway_cluster_CKMS2021( N, @@ -31,7 +32,7 @@ omega_V=np.array([0.25, 0]), return_type="array", ) -obj_dml_oneway_cluster_data = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars) +obj_dml_oneway_cluster_data = DoubleMLData.from_arrays(x, y, d, cluster_vars=cluster_vars) # only the first cluster variable is relevant with the weight setting above obj_dml_oneway_cluster_data.cluster_cols = "cluster_var1" @@ -195,7 +196,7 @@ def dml_plr_cluster_nonlinear_with_index(generate_data1, learner): dml_plr_obj.fit() df = data.reset_index() - dml_cluster_data = dml.DoubleMLClusterData(df, y_col="y", d_cols="d", x_cols=x_cols, cluster_cols="index") + dml_cluster_data = dml.DoubleMLData(df, y_col="y", d_cols="d", x_cols=x_cols, cluster_cols="index") np.random.seed(3141) dml_plr_cluster_obj = DoubleMLPLRWithNonLinearScoreMixin(dml_cluster_data, ml_l, ml_m, n_folds=n_folds) dml_plr_cluster_obj.fit() diff --git a/doubleml/tests/test_return_types.py b/doubleml/tests/test_return_types.py index 11ebd624..fe3d676d 100644 --- a/doubleml/tests/test_return_types.py +++ b/doubleml/tests/test_return_types.py @@ -8,11 +8,10 @@ from doubleml import ( DoubleMLAPO, - DoubleMLClusterData, DoubleMLCVAR, - DoubleMLData, DoubleMLDID, DoubleMLDIDCS, + DoubleMLDIDData, DoubleMLFramework, DoubleMLIIVM, DoubleMLIRM, @@ -23,15 +22,9 @@ DoubleMLPQ, DoubleMLSSM, ) -from doubleml.datasets import ( - make_iivm_data, - make_irm_data, - make_pliv_CHS2015, - make_pliv_multiway_cluster_CKMS2021, - make_plr_CCDDHNR2018, - make_ssm_data, -) from doubleml.did.datasets import make_did_SZ2020 +from doubleml.irm.datasets import make_iivm_data, make_irm_data, make_ssm_data +from doubleml.plm.datasets import make_pliv_CHS2015, make_pliv_multiway_cluster_CKMS2021, make_plr_CCDDHNR2018 np.random.seed(3141) n_obs = 200 @@ -44,8 +37,8 @@ dml_data_did_cs = make_did_SZ2020(n_obs=n_obs, cross_sectional_data=True) (x, y, d, t) = make_did_SZ2020(n_obs=n_obs, cross_sectional_data=True, return_type="array") binary_outcome = np.random.binomial(n=1, p=0.5, size=n_obs) -dml_data_did_binary_outcome = DoubleMLData.from_arrays(x, binary_outcome, d) -dml_data_did_cs_binary_outcome = DoubleMLData.from_arrays(x, binary_outcome, d, t=t) +dml_data_did_binary_outcome = DoubleMLDIDData.from_arrays(x, binary_outcome, d) +dml_data_did_cs_binary_outcome = DoubleMLDIDData.from_arrays(x, binary_outcome, d, t=t) dml_data_ssm = make_ssm_data(n_obs=n_obs) dml_plr = DoubleMLPLR(dml_data_plr, Lasso(), Lasso()) @@ -92,14 +85,14 @@ def test_return_types(dml_obj, cls): if not dml_obj._is_cluster_data: assert isinstance(dml_obj.set_sample_splitting(dml_obj.smpls), cls) else: - assert isinstance(dml_obj._dml_data, DoubleMLClusterData) + assert dml_obj._dml_data.is_cluster_data assert isinstance(dml_obj.fit(), cls) assert isinstance(dml_obj.__str__(), str) # called again after fit, now with numbers assert isinstance(dml_obj.summary, pd.DataFrame) # called again after fit, now with numbers if not dml_obj._is_cluster_data: assert isinstance(dml_obj.bootstrap(), cls) else: - assert isinstance(dml_obj._dml_data, DoubleMLClusterData) + assert dml_obj._dml_data.is_cluster_data assert isinstance(dml_obj.confint(), pd.DataFrame) if not dml_obj._is_cluster_data: assert isinstance(dml_obj.p_adjust(), pd.DataFrame) diff --git a/doubleml/tests/test_scores.py b/doubleml/tests/test_scores.py index c3281702..0687546d 100644 --- a/doubleml/tests/test_scores.py +++ b/doubleml/tests/test_scores.py @@ -3,7 +3,8 @@ from sklearn.linear_model import Lasso, LogisticRegression from doubleml import DoubleMLIIVM, DoubleMLIRM, DoubleMLPLIV, DoubleMLPLR -from doubleml.datasets import make_iivm_data, make_irm_data, make_pliv_CHS2015, make_plr_CCDDHNR2018 +from doubleml.irm.datasets import make_iivm_data, make_irm_data +from doubleml.plm.datasets import make_pliv_CHS2015, make_plr_CCDDHNR2018 np.random.seed(3141) dml_data_plr = make_plr_CCDDHNR2018(n_obs=100) diff --git a/doubleml/tests/test_sensitivity.py b/doubleml/tests/test_sensitivity.py index e4b43495..a0e47c0d 100644 --- a/doubleml/tests/test_sensitivity.py +++ b/doubleml/tests/test_sensitivity.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LinearRegression, LogisticRegression import doubleml as dml -from doubleml.datasets import make_irm_data +from doubleml.irm.datasets import make_irm_data from ._utils_doubleml_sensitivity_manual import doubleml_sensitivity_benchmark_manual, doubleml_sensitivity_manual diff --git a/doubleml/tests/test_sensitivity_cluster.py b/doubleml/tests/test_sensitivity_cluster.py index 65ec0d64..3be3f409 100644 --- a/doubleml/tests/test_sensitivity_cluster.py +++ b/doubleml/tests/test_sensitivity_cluster.py @@ -5,7 +5,7 @@ from sklearn.linear_model import LinearRegression import doubleml as dml -from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021 +from doubleml.plm.datasets import make_pliv_multiway_cluster_CKMS2021 from ._utils_doubleml_sensitivity_manual import doubleml_sensitivity_benchmark_manual @@ -17,7 +17,7 @@ (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021(N, M, dim_x, return_type="array") -obj_dml_cluster_data = dml.DoubleMLClusterData.from_arrays(x, y, d, cluster_vars) +obj_dml_cluster_data = dml.DoubleMLData.from_arrays(x, y, d, cluster_vars=cluster_vars) (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021( N, @@ -29,7 +29,7 @@ omega_V=np.array([0.25, 0]), return_type="array", ) -obj_dml_oneway_cluster_data = dml.DoubleMLClusterData.from_arrays(x, y, d, cluster_vars) +obj_dml_oneway_cluster_data = dml.DoubleMLData.from_arrays(x, y, d, cluster_vars=cluster_vars) # only the first cluster variable is relevant with the weight setting above obj_dml_oneway_cluster_data.cluster_cols = "cluster_var1" diff --git a/doubleml/tests/test_set_ml_nuisance_params.py b/doubleml/tests/test_set_ml_nuisance_params.py index a189b184..055bcbff 100644 --- a/doubleml/tests/test_set_ml_nuisance_params.py +++ b/doubleml/tests/test_set_ml_nuisance_params.py @@ -3,7 +3,8 @@ from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from doubleml import DoubleMLCVAR, DoubleMLIIVM, DoubleMLIRM, DoubleMLLPQ, DoubleMLPLIV, DoubleMLPLR, DoubleMLPQ -from doubleml.datasets import make_iivm_data, make_irm_data, make_pliv_CHS2015, make_plr_CCDDHNR2018 +from doubleml.irm.datasets import make_iivm_data, make_irm_data +from doubleml.plm.datasets import make_pliv_CHS2015, make_plr_CCDDHNR2018 # set default and test values n_est_default = 100 diff --git a/doubleml/tests/test_set_sample_splitting.py b/doubleml/tests/test_set_sample_splitting.py index 97313a00..0995d831 100644 --- a/doubleml/tests/test_set_sample_splitting.py +++ b/doubleml/tests/test_set_sample_splitting.py @@ -3,7 +3,7 @@ from sklearn.linear_model import Lasso from doubleml import DoubleMLPLR -from doubleml.datasets import make_plr_CCDDHNR2018 +from doubleml.plm.datasets import make_plr_CCDDHNR2018 np.random.seed(3141) dml_data = make_plr_CCDDHNR2018(n_obs=10) diff --git a/doubleml/utils/_aliases.py b/doubleml/utils/_aliases.py index e52a5818..b1dcaa21 100644 --- a/doubleml/utils/_aliases.py +++ b/doubleml/utils/_aliases.py @@ -1,11 +1,22 @@ import numpy as np import pandas as pd -from doubleml.data import DoubleMLClusterData, DoubleMLData +from doubleml.data import ( + DoubleMLClusterData, + DoubleMLData, + DoubleMLDIDData, + DoubleMLPanelData, + DoubleMLRDDData, + DoubleMLSSMData, +) _array_alias = ["array", "np.ndarray", "np.array", np.ndarray] _data_frame_alias = ["DataFrame", "pd.DataFrame", pd.DataFrame] _dml_data_alias = ["DoubleMLData", DoubleMLData] +_dml_did_data_alias = ["DoubleMLDIDData", DoubleMLDIDData] +_dml_panel_data_alias = ["DoubleMLPanelData", DoubleMLPanelData] +_dml_rdd_data_alias = ["DoubleMLRDDData", DoubleMLRDDData] +_dml_ssm_data_alias = ["DoubleMLSSMData", DoubleMLSSMData] _dml_cluster_data_alias = ["DoubleMLClusterData", DoubleMLClusterData] @@ -27,3 +38,23 @@ def _get_dml_data_alias(): def _get_dml_cluster_data_alias(): """Returns the list of DoubleMLClusterData aliases.""" return _dml_cluster_data_alias + + +def _get_dml_did_data_alias(): + """Returns the list of DoubleMLDIDData aliases.""" + return _dml_did_data_alias + + +def _get_dml_panel_data_alias(): + """Returns the list of DoubleMLPanelData aliases.""" + return _dml_panel_data_alias + + +def _get_dml_rdd_data_alias(): + """Returns the list of DoubleMLRDDData aliases.""" + return _dml_rdd_data_alias + + +def _get_dml_ssm_data_alias(): + """Returns the list of DoubleMLSSMData aliases.""" + return _dml_ssm_data_alias diff --git a/doubleml/utils/_check_return_types.py b/doubleml/utils/_check_return_types.py index 54e72833..b73e2e04 100644 --- a/doubleml/utils/_check_return_types.py +++ b/doubleml/utils/_check_return_types.py @@ -3,7 +3,6 @@ import plotly from doubleml import DoubleMLFramework -from doubleml.data import DoubleMLClusterData from doubleml.double_ml_score_mixins import NonLinearScoreMixin @@ -15,14 +14,14 @@ def check_basic_return_types(dml_obj, cls): if not dml_obj._is_cluster_data: assert isinstance(dml_obj.set_sample_splitting(dml_obj.smpls), cls) else: - assert isinstance(dml_obj._dml_data, DoubleMLClusterData) + assert dml_obj._dml_data.is_cluster_data assert isinstance(dml_obj.fit(), cls) assert isinstance(dml_obj.__str__(), str) # called again after fit, now with numbers assert isinstance(dml_obj.summary, pd.DataFrame) # called again after fit, now with numbers if not dml_obj._is_cluster_data: assert isinstance(dml_obj.bootstrap(), cls) else: - assert isinstance(dml_obj._dml_data, DoubleMLClusterData) + assert dml_obj._dml_data.is_cluster_data assert isinstance(dml_obj.confint(), pd.DataFrame) if not dml_obj._is_cluster_data: assert isinstance(dml_obj.p_adjust(), pd.DataFrame) @@ -61,6 +60,8 @@ def check_basic_property_types_and_shapes(dml_obj, n_obs, n_treat, n_rep, n_fold assert isinstance(dml_obj.psi, np.ndarray) assert dml_obj.psi.shape == score_dim + assert isinstance(dml_obj.psi_deriv, np.ndarray) + assert dml_obj.psi_deriv.shape == score_dim is_nonlinear = isinstance(dml_obj, NonLinearScoreMixin) if is_nonlinear: for score_element in dml_obj._score_element_names: