Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions dev-support/bin/hadoop.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ function personality_globals
JIRA_ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$'
#shellcheck disable=SC2034
GITHUB_REPO_DEFAULT="apache/hadoop"
# mount urandom to increase entropy
DOCKER_EXTRAARGS=("-v" "/dev/urandom:/dev/random")

HADOOP_HOMEBREW_DIR=${HADOOP_HOMEBREW_DIR:-$(brew --prefix 2>/dev/null)}
if [[ -z "${HADOOP_HOMEBREW_DIR}" ]]; then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,7 @@
public class TestMRIntermediateDataEncryption {
private static final Logger LOG =
LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class);
/**
* Use urandom to avoid the YarnChild process from hanging on low entropy
* systems.
*/
private static final String JVM_SECURITY_EGD_OPT =
"-Djava.security.egd=file:/dev/./urandom";

// Where MR job's input will reside.
private static final Path INPUT_DIR = new Path("/test/input");
// Where output goes.
Expand Down Expand Up @@ -115,14 +110,6 @@ public static void setupClass() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);

// Set the jvm arguments.
conf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
JVM_SECURITY_EGD_OPT);
final String childJVMOpts = JVM_SECURITY_EGD_OPT
+ " " + conf.get("mapred.child.java.opts", " ");
conf.set("mapred.child.java.opts", childJVMOpts);


// Start the mini-MR and mini-DFS clusters.
dfsCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_NODES).build();
Expand Down
22 changes: 16 additions & 6 deletions start-build-env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,20 @@ DOCKER_INTERACTIVE_RUN=${DOCKER_INTERACTIVE_RUN-"-i -t"}
# within the container and use the result on your normal
# system. And this also is a significant speedup in subsequent
# builds because the dependencies are downloaded only once.
docker run --rm=true $DOCKER_INTERACTIVE_RUN \
-v "${PWD}:${DOCKER_HOME_DIR}/hadoop${V_OPTS:-}" \
-w "${DOCKER_HOME_DIR}/hadoop" \
-v "${HOME}/.m2:${DOCKER_HOME_DIR}/.m2${V_OPTS:-}" \
-v "${HOME}/.gnupg:${DOCKER_HOME_DIR}/.gnupg${V_OPTS:-}" \
-u "${USER_ID}" \
dockerargs=(--rm=true)
dockerargs+=($DOCKER_INTERACTIVE_RUN)
# use urandom to increase entropy
dockerargs+=(-v "/dev/urandom:/dev/random${V_OPTS:-}")
# mount current directory
dockerargs+=(-v "${PWD}:${DOCKER_HOME_DIR}/hadoop${V_OPTS:-}")
# mount maven directory
dockerargs+=(-v "${HOME}/.m2:${DOCKER_HOME_DIR}/.m2${V_OPTS:-}")
# mount gnu
dockerargs+=(-v "${HOME}/.gnupg:${DOCKER_HOME_DIR}/.gnupg${V_OPTS:-}")
# set work directory
dockerargs+=(-w "${DOCKER_HOME_DIR}/hadoop")
# set user
dockerargs+=(-u "${USER_ID}")

docker run "${dockerargs[@]}" \
"hadoop-build-${USER_ID}" "$@"