|  | 
| 18 | 18 | 
 | 
| 19 | 19 | set -e | 
| 20 | 20 | function usage { | 
| 21 |  | -  echo "Usage: ${0} [options] /path/to/component/bin-install /path/to/hadoop/executable /path/to/hadoop/hadoop-yarn-server-tests-tests.jar /path/to/hadoop/hadoop-mapreduce-client-jobclient-tests.jar /path/to/mapred/executable" | 
|  | 21 | +  echo "Usage: ${0} [options] /path/to/component/bin-install /path/to/hadoop/executable /path/to/share/hadoop/yarn/timelineservice /path/to/hadoop/hadoop-yarn-server-tests-tests.jar /path/to/hadoop/hadoop-mapreduce-client-jobclient-tests.jar /path/to/mapred/executable" | 
| 22 | 22 |   echo "" | 
| 23 | 23 |   echo "    --zookeeper-data /path/to/use                                     Where the embedded zookeeper instance should write its data." | 
| 24 | 24 |   echo "                                                                      defaults to 'zk-data' in the working-dir." | 
| @@ -67,9 +67,10 @@ if [ $# -lt 5 ]; then | 
| 67 | 67 | fi | 
| 68 | 68 | component_install="$(cd "$(dirname "$1")"; pwd)/$(basename "$1")" | 
| 69 | 69 | hadoop_exec="$(cd "$(dirname "$2")"; pwd)/$(basename "$2")" | 
| 70 |  | -yarn_server_tests_test_jar="$(cd "$(dirname "$3")"; pwd)/$(basename "$3")" | 
| 71 |  | -mapred_jobclient_test_jar="$(cd "$(dirname "$4")"; pwd)/$(basename "$4")" | 
| 72 |  | -mapred_exec="$(cd "$(dirname "$5")"; pwd)/$(basename "$5")" | 
|  | 70 | +timeline_service_dir="$(cd "$(dirname "$3")"; pwd)/$(basename "$3")" | 
|  | 71 | +yarn_server_tests_test_jar="$(cd "$(dirname "$4")"; pwd)/$(basename "$4")" | 
|  | 72 | +mapred_jobclient_test_jar="$(cd "$(dirname "$5")"; pwd)/$(basename "$5")" | 
|  | 73 | +mapred_exec="$(cd "$(dirname "$6")"; pwd)/$(basename "$6")" | 
| 73 | 74 | 
 | 
| 74 | 75 | if [ ! -x "${hadoop_exec}" ]; then | 
| 75 | 76 |   echo "hadoop cli does not appear to be executable." >&2 | 
| @@ -285,18 +286,25 @@ echo "Starting up Hadoop" | 
| 285 | 286 | if [ "${hadoop_version%.*.*}" -gt 2 ]; then | 
| 286 | 287 |   "${mapred_exec}" minicluster -format -writeConfig "${working_dir}/hbase-conf/core-site.xml" -writeDetails "${working_dir}/hadoop_cluster_info.json" >"${working_dir}/hadoop_cluster_command.out" 2>"${working_dir}/hadoop_cluster_command.err" & | 
| 287 | 288 | else | 
| 288 |  | -  HADOOP_CLASSPATH="${yarn_server_tests_test_jar}" "${hadoop_exec}" jar "${mapred_jobclient_test_jar}" minicluster -format -writeConfig "${working_dir}/hbase-conf/core-site.xml" -writeDetails "${working_dir}/hadoop_cluster_info.json" >"${working_dir}/hadoop_cluster_command.out" 2>"${working_dir}/hadoop_cluster_command.err" & | 
|  | 289 | +  HADOOP_CLASSPATH="${timeline_service_dir}/*:${timeline_service_dir}/lib/*:${yarn_server_tests_test_jar}" "${hadoop_exec}" jar "${mapred_jobclient_test_jar}" minicluster -format -writeConfig "${working_dir}/hbase-conf/core-site.xml" -writeDetails "${working_dir}/hadoop_cluster_info.json" >"${working_dir}/hadoop_cluster_command.out" 2>"${working_dir}/hadoop_cluster_command.err" & | 
| 289 | 290 | fi | 
| 290 | 291 | 
 | 
| 291 | 292 | echo "$!" > "${working_dir}/hadoop.pid" | 
| 292 | 293 | 
 | 
|  | 294 | +# 2 + 4 + 8 + .. + 256 ~= 8.5 minutes. | 
|  | 295 | +max_sleep_time=512 | 
| 293 | 296 | sleep_time=2 | 
| 294 |  | -until [ -s "${working_dir}/hbase-conf/core-site.xml" ]; do | 
|  | 297 | +until [[ -s "${working_dir}/hbase-conf/core-site.xml" || "${sleep_time}" -ge "${max_sleep_time}" ]]; do | 
| 295 | 298 |   printf '\twaiting for Hadoop to finish starting up.\n' | 
| 296 | 299 |   sleep "${sleep_time}" | 
| 297 | 300 |   sleep_time="$((sleep_time*2))" | 
| 298 | 301 | done | 
| 299 | 302 | 
 | 
|  | 303 | +if [ "${sleep_time}" -ge "${max_sleep_time}" ] ; then | 
|  | 304 | +  echo "time out waiting for Hadoop to startup" >&2 | 
|  | 305 | +  exit 1 | 
|  | 306 | +fi | 
|  | 307 | + | 
| 300 | 308 | if [ "${hadoop_version%.*.*}" -gt 2 ]; then | 
| 301 | 309 |   echo "Verifying configs" | 
| 302 | 310 |   "${hadoop_exec}" --config "${working_dir}/hbase-conf/" conftest | 
|  | 
0 commit comments