From 0dff5e3ce996e93040a9f6bb2bb0771ea716d8d9 Mon Sep 17 00:00:00 2001 From: nahomi Date: Thu, 14 Mar 2024 15:57:15 -0400 Subject: [PATCH] edits to survey experiments MG --- guides/data-strategies/survey-experiments.bib | 12 ++++ .../data-strategies/survey-experiments_en.qmd | 58 +++++++++++-------- 2 files changed, 46 insertions(+), 24 deletions(-) diff --git a/guides/data-strategies/survey-experiments.bib b/guides/data-strategies/survey-experiments.bib index 02b420c..f06a08d 100644 --- a/guides/data-strategies/survey-experiments.bib +++ b/guides/data-strategies/survey-experiments.bib @@ -15,6 +15,18 @@ @inproceedings{caughey2013testing date-added = {2016-09-13 14:31:13 +0000}, date-modified = {2016-09-13 14:31:13 +0000} } +@article{boas_norms_2019, + title = {Norms versus {{Action}}: {{Why Voters Fail}} to {{Sanction Malfeasance}} in {{Brazil}}}, + shorttitle = {Norms versus {{Action}}}, + author = {Boas, Taylor C. and Hidalgo, F. Daniel and Melo, Marcus Andr{\'e}}, + year = {2019}, + journal = {American Journal of Political Science}, + volume = {63}, + number = {2}, + pages = {385--400}, + issn = {1540-5907}, + doi = {10.1111/ajps.12413} +} @article{lin2013agnostic, title = {Agnostic notes on regression adjustments to experimental data: Reexamining Freedman's critique}, author = {Lin, Winston}, diff --git a/guides/data-strategies/survey-experiments_en.qmd b/guides/data-strategies/survey-experiments_en.qmd index d4c92d8..e143fef 100644 --- a/guides/data-strategies/survey-experiments_en.qmd +++ b/guides/data-strategies/survey-experiments_en.qmd @@ -1,7 +1,9 @@ --- title: "10 Things to Know About Survey Experiments" author: - - name: "Christopher Grady" + - name: "Eddy S. F. Yeung (2024 revision)" + url: https://eddy-yeung.github.io/ + - name: "Christopher Grady (original)" url: https://publish.illinois.edu/cdgrady2/ bibliography: survey-experiments.bib image: survey-experiments.png @@ -19,23 +21,23 @@ color: black; -Survey experiments are widely used by social scientists to study individual preferences. This guide discusses the functions and considerations of survey experiments. +Survey experiments are experiments conducted within a survey. They are widely used to study the prevalence of sensitive attitudes and behaviors and of complex preferences. This guide describes several common types of survey experiments -- conjoint experiments, priming experiments, endorsement experiments, list experiments (item count technique), and randomized response experiments -- and some key considerations for their design and analysis. + -# What is a survey experiment +# What is a survey experiment? -A survey experiment is an experiment conducted within a survey. In an experiment, a researcher randomly assigns participants to at least two experimental conditions. The researcher then treats each condition differently. Because of random assignment, any differences between the experimental conditions would result from the treatment. In a survey experiment, the randomization and treatment occur within a questionnaire. +A survey experiment is an experiment conducted within a survey. Survey respondents are randomly assigned to two or more experimental conditions (also known as treatment arms), and because of this randomization, differences in survey responses across groups can be interpreted as effects of the difference in treatment conditions, plus some random noise. -# Why do a survey experiment +# Why do a survey experiment? +Survey experiments are useful when a regular survey, without experimentation, may generate biased or even nonsensical responses about individual survey respondents' attitudes, preferences, or behaviors. For example, if researchers are interested in studying the effects of some information on support for a policy, directly asking each survey respondent "how does this information affect your support for the policy?" may raise concerns about the accuracy and truthfulness of the responses. Regular surveys may similarly not be able to reliably measure complex, multidimensional preferences through individual self-reports. Direct questioning techniques may also understate the prevalence of attitudes and behaviors, such as racist attitudes and illegal activities, with which respondents do not want to be publicly associated. In these situations, survey experiments help to measure the _prevalence_ of attitudes, preferences, and behaviors for a respondent pool better than regular surveys. As with other randomized experiments, survey experiments are generally used to estimate features of the group rather than specific individuals. -Survey experiments are useful when researchers want to learn about individual perceptions, attitudes, or behaviors. They are especially useful when a regular survey, without experimentation, may generate biased or even nonsensical responses. For example, if researchers are interested in studying the effects of policy information on individual preferences for a policy, directly asking each survey respondent "how does this information affect your attitudes toward the policy?" may raise concerns about the accuracy and truthfulness of the responses. Rather, researchers may find it useful to provide the policy information to a randomized subset of respondents, followed by comparing the policy preferences between those who are subject to the policy information and those who are not. + -More generally, survey experiments help to measure individual preferences. For example, when the preferences of interest are multidimensional, regular surveys may not be able to reliably measure such complex preferences through individual self-reports. Other preferences, such as racist attitudes and illegal behaviors, may be sensitive --- preferences with which respondents do not want to be publicly associated. Direct questioning techniques may thus understate the prevalence of these preferences. In these cases, survey experiments, compared to regular surveys, can be useful to address these measurement challenges. -There are various types of survey experiments. Five of them --- conjoint experiments, priming experiments, endorsement experiments, list experiments, and randomized response --- are covered in the following sections. In a typical endorsement experiment, respondents are asked how much they support a policy. In the treatment condition, the policy is said to be endorsed by an actor or a group. In the control condition, however, this endorsement information is omitted. The average difference in support between the endorsed and unendorsed policy represents the change in support for the policy because of the endorsement of the controversial figure. +Endorsement experiments measure attitudes toward a potentially sensitive object, usually a controversial political actor or group. In a typical endorsement experiment, respondents are asked how much they support a policy. In the treatment condition, the policy is said to be endorsed by an actor or a group. In the control condition, however, this endorsement information is omitted. The average difference in support between the endorsed and unendorsed policy represents the change in support for the policy because of the endorsement of the actor or group. @@ -135,7 +139,9 @@ For example, @nicholson2012polarizing used an endorsement experiment to study pa > As you know, there has been a lot of talk about immigration reform policy in the news. One proposal [**backed by Barack Obama**/**backed by John McCain**] provided legal status and a path to legal citizenship for the approximately 12 million illegal immigrants currently residing in the United States. What is your view of this immigration reform policy? -On one hand, the difference between the control condition and the Obama (McCain) condition for Democrats (Republicans) indicates in-party bias. On the other, the difference between the control condition and the Obama (McCain) condition for Republicans (Democrats) indicates out-party bias. This experiment helps researchers gauge the favorability toward the potentially sensitive item (i.e., the political actor), as other well-designed endorsement experiments also do. Because endorsement experiments preempt the need for respondents to self-report their support for a controversial object, they are especially useful in politically sensitive contexts. For example, they have been used to measure public support for militant groups (e.g., @bullock2011statistical; @lyall2013explaining). +On one hand, the difference between the control condition and the Obama (McCain) condition for Democrats (Republicans) indicates in-party bias. On the other, the difference between the control condition and the Obama (McCain) condition for Republicans (Democrats) indicates out-party bias. This experiment helps researchers gauge the favorability toward the potentially sensitive item (i.e., the political actor). + +Because endorsement experiments preempt the need for respondents to self-report their support for a controversial object, they are especially useful in politically sensitive contexts. For example, they have been used to measure public support for militant groups (e.g., @bullock2011statistical; @lyall2013explaining). For example, @kuklinski1997list studied racial animus with a list experiment. They told respondents: @@ -173,7 +180,8 @@ _(4) a black family moving in next door_ In the above example, the fourth item was withheld from the control condition. The authors found that the mean number of items chosen in the treatment group was 2.37, compared to 1.95 in the control group. The difference of 0.42 between treatment and control suggests that 42% of respondents would be upset by a black family moving in next door. -Despite the anonymity provided by a list experiment, respondents may still worry that their response reflects their attitudes about the sensitive item. When respondents worry about a lack of anonymity, they may increase or decrease their response to portray themselves in the best light possible, rather than answer honestly [@leary1990impression]. Given this limitation, researchers have developed other types of list experiments, including _double list experiments_ and _placebo-controlled list experiments_. Interested readers may consult @glynn2013double and @riambau2019placebo for detailed discussions about their implementation, as well as how they help to overcome some of the potential pitfalls of simple list experiments. +Despite the cover for the sensitive attitude or behavior provided by a list experiment, respondents may still worry that their response reflects their attitudes about the sensitive item since a respondent's attitude toward each item can be determined if the respondent answers that all or none of the items apply to them. Respondents then may increase or decrease their response to portray themselves in a better light [@leary1990impression]. New types of list experiments, including _double list experiments_ [@glynn2013double] and _placebo-controlled list experiments_ [@riambau2019placebo], have been developed to address these concerns. + For example, @blair2015design studied support for militants in Nigeria with the randomized response technique. They gave respondents a die and had the respondent practice throwing it. They then told respondents: @@ -211,7 +221,7 @@ Now throw the dice so that I cannot see what comes out. Please do not forget th [ENUMERATOR WAIT TO TURN AROUND UNTIL RESPONDENT SAYS YES TO]: Have you thrown the dice? Have you picked it up?

Now, during the height of the conflict in 2007 and 2008, did you know any militants, like a family member, a friend, or someone you talked to on a regular basis? Please, before you answer, take note of the number you rolled on the dice. -In expectation, one-sixth of respondents answer "yes" due to the die throw. The researcher can thus determine what percentage of respondents engaged in the sensitive behavior. Here, however, respondents might not feel that their answers to randomized response questions were truly anonymous. This is because if a respondent answered yes, the answer could have been dictated by the randomization device, but it could also signal agreement with the sensitive item.^[See @edgell1982validity and @yu2008two.] Indeed, there are other types of randomized response techniques that address this limitation, including the _repeated randomized response technique_ and the _crosswise model_. We refer interested readers to @azfar2009identifying and @jann2011asking for the logic and implementation of these techniques. +In expectation, one-sixth of respondents answer "yes" due to the die throw. The researcher can thus determine what percentage of respondents engaged in the sensitive behavior. Here, however, respondents might not feel that their answers to randomized response questions were truly anonymous.^[See @edgell1982validity and @yu2008two.] Other types of randomized response techniques, including the _repeated randomized response technique_ and the _crosswise model_, address this limitation. @azfar2009identifying and @jann2011asking explain the logic and implementation of these techniques.