Skip to content

Commit ca2b527

Browse files
committed
adaptions to reflect the adapted nuisance estimation for the PLR IV-type score
1 parent e704544 commit ca2b527

File tree

4 files changed

+10
-10
lines changed

4 files changed

+10
-10
lines changed

doc/examples/double_ml_bonus_data.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@
9898
" 'max_features': 'sqrt',\n",
9999
" 'max_depth': 5}\n",
100100
"\n",
101-
"dml_plr_rf.set_ml_nuisance_params('ml_g', 'tg', pars)\n",
101+
"dml_plr_rf.set_ml_nuisance_params('ml_l', 'tg', pars)\n",
102102
"dml_plr_rf.set_ml_nuisance_params('ml_m', 'tg', pars)"
103103
]
104104
},
@@ -153,7 +153,7 @@
153153
" 'dml2')\n",
154154
"\n",
155155
"# set some hyperparameters for the learners\n",
156-
"dml_plr_lasso.set_ml_nuisance_params('ml_g', 'tg', {'alpha': 0.0005})\n",
156+
"dml_plr_lasso.set_ml_nuisance_params('ml_l', 'tg', {'alpha': 0.0005})\n",
157157
"dml_plr_lasso.set_ml_nuisance_params('ml_m', 'tg', {'alpha': 0.0026})"
158158
]
159159
},
@@ -265,7 +265,7 @@
265265
],
266266
"metadata": {
267267
"kernelspec": {
268-
"display_name": "Python 3",
268+
"display_name": "Python 3 (ipykernel)",
269269
"language": "python",
270270
"name": "python3"
271271
},
@@ -279,7 +279,7 @@
279279
"name": "python",
280280
"nbconvert_exporter": "python",
281281
"pygments_lexer": "ipython3",
282-
"version": "3.8.6"
282+
"version": "3.9.7"
283283
}
284284
},
285285
"nbformat": 4,

doc/guide/basics.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ other half of observations indexed with :math:`i \in I`
176176

177177
.. ipython:: python
178178
179-
def non_orth_score(y, d, g_hat, m_hat, smpls):
179+
def non_orth_score(y, d, l_hat, g_hat, m_hat, smpls):
180180
u_hat = y - g_hat
181181
psi_a = -np.multiply(d, d)
182182
psi_b = np.multiply(d, u_hat)
@@ -222,7 +222,7 @@ other half of observations indexed with :math:`i \in I`
222222

223223
.. jupyter-execute::
224224

225-
non_orth_score = function(y, d, g_hat, m_hat, smpls) {
225+
non_orth_score = function(y, d, l_hat, g_hat, m_hat, smpls) {
226226
u_hat = y - g_hat
227227
psi_a = -1*d*d
228228
psi_b = d*u_hat

doc/guide/learners.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ Without further specification of the hyperparameters, default values are used. T
8181
dml_plr_obj = dml.DoubleMLPLR(obj_dml_data,
8282
RandomForestRegressor(),
8383
RandomForestRegressor())
84-
dml_plr_obj.set_ml_nuisance_params('ml_g', 'd', {'n_estimators': 10});
84+
dml_plr_obj.set_ml_nuisance_params('ml_l', 'd', {'n_estimators': 10});
8585
print(dml_plr_obj.fit().summary)
8686
8787
Setting treatment-variable-specific or fold-specific hyperparameters:
@@ -163,7 +163,7 @@ In this case the tuning should be done externally and the parameters can then be
163163
ml_g = Lasso()
164164
ml_m = Lasso()
165165
dml_plr_obj = dml.DoubleMLPLR(dml_data, ml_g, ml_m)
166-
dml_plr_obj.set_ml_nuisance_params('ml_g', 'd', {'alpha': ml_g_tune.alpha_});
166+
dml_plr_obj.set_ml_nuisance_params('ml_l', 'd', {'alpha': ml_g_tune.alpha_});
167167
dml_plr_obj.set_ml_nuisance_params('ml_m', 'd', {'alpha': ml_m_tune.alpha_});
168168
print(dml_plr_obj.params)
169169
print(dml_plr_obj.fit().summary)

doc/guide/scores.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ can be obtained with
280280
281281
import numpy as np
282282
283-
def non_orth_score(y, d, g_hat, m_hat, smpls):
283+
def non_orth_score(y, d, l_hat, g_hat, m_hat, smpls):
284284
u_hat = y - g_hat
285285
psi_a = -np.multiply(d, d)
286286
psi_b = np.multiply(d, u_hat)
@@ -290,7 +290,7 @@ can be obtained with
290290

291291
.. jupyter-execute::
292292

293-
non_orth_score = function(y, d, g_hat, m_hat, smpls) {
293+
non_orth_score = function(y, d, l_hat, g_hat, m_hat, smpls) {
294294
u_hat = y - g_hat
295295
psi_a = -1*d*d
296296
psi_b = d*u_hat

0 commit comments

Comments
 (0)