|
17 | 17 |
|
18 | 18 | package org.apache.spark.deploy.yarn |
19 | 19 |
|
| 20 | +import java.util.{Arrays, List => JList} |
| 21 | + |
| 22 | +import org.apache.hadoop.conf.Configuration |
| 23 | +import org.apache.hadoop.fs.CommonConfigurationKeysPublic |
| 24 | +import org.apache.hadoop.net.DNSToSwitchMapping |
| 25 | +import org.apache.hadoop.yarn.api.records._ |
| 26 | +import org.apache.hadoop.yarn.client.api.AMRMClient |
| 27 | +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest |
| 28 | + |
| 29 | +import org.apache.spark.SecurityManager |
| 30 | +import org.apache.spark.SparkConf |
| 31 | +import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._ |
20 | 32 | import org.apache.spark.deploy.yarn.YarnAllocator._ |
21 | | -import org.scalatest.FunSuite |
| 33 | +import org.apache.spark.scheduler.SplitInfo |
| 34 | + |
| 35 | +import org.scalatest.{BeforeAndAfterEach, FunSuite, Matchers} |
| 36 | + |
| 37 | +class MockResolver extends DNSToSwitchMapping { |
| 38 | + |
| 39 | + override def resolve(names: JList[String]): JList[String] = { |
| 40 | + if (names.size > 0 && names.get(0) == "host3") Arrays.asList("/rack2") |
| 41 | + else Arrays.asList("/rack1") |
| 42 | + } |
| 43 | + |
| 44 | + override def reloadCachedMappings() {} |
| 45 | + |
| 46 | + def reloadCachedMappings(names: JList[String]) {} |
| 47 | +} |
| 48 | + |
| 49 | +class YarnAllocatorSuite extends FunSuite with Matchers with BeforeAndAfterEach { |
| 50 | + val conf = new Configuration() |
| 51 | + conf.setClass( |
| 52 | + CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, |
| 53 | + classOf[MockResolver], classOf[DNSToSwitchMapping]) |
| 54 | + |
| 55 | + val sparkConf = new SparkConf() |
| 56 | + sparkConf.set("spark.driver.host", "localhost") |
| 57 | + sparkConf.set("spark.driver.port", "4040") |
| 58 | + sparkConf.set("spark.yarn.jar", "notarealjar.jar") |
| 59 | + sparkConf.set("spark.yarn.launchContainers", "false") |
| 60 | + |
| 61 | + val appAttemptId = ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0) |
| 62 | + |
| 63 | + // Resource returned by YARN. YARN can give larger containers than requested, so give 6 cores |
| 64 | + // instead of the 5 requested and 3 GB instead of the 2 requested. |
| 65 | + val containerResource = Resource.newInstance(3072, 6) |
| 66 | + |
| 67 | + var rmClient: AMRMClient[ContainerRequest] = _ |
| 68 | + |
| 69 | + var containerNum = 0 |
| 70 | + |
| 71 | + override def beforeEach() { |
| 72 | + rmClient = AMRMClient.createAMRMClient() |
| 73 | + rmClient.init(conf) |
| 74 | + rmClient.start() |
| 75 | + } |
| 76 | + |
| 77 | + override def afterEach() { |
| 78 | + rmClient.stop() |
| 79 | + } |
| 80 | + |
| 81 | + class MockSplitInfo(host: String) extends SplitInfo(null, host, null, 1, null) { |
| 82 | + override def equals(other: Any) = false |
| 83 | + } |
| 84 | + |
| 85 | + def createAllocator(maxExecutors: Int = 5): YarnAllocator = { |
| 86 | + val args = Array( |
| 87 | + "--num-executors", s"$maxExecutors", |
| 88 | + "--executor-cores", "5", |
| 89 | + "--executor-memory", "2048", |
| 90 | + "--jar", "somejar.jar", |
| 91 | + "--class", "SomeClass") |
| 92 | + new YarnAllocator( |
| 93 | + conf, |
| 94 | + sparkConf, |
| 95 | + rmClient, |
| 96 | + appAttemptId, |
| 97 | + new ApplicationMasterArguments(args), |
| 98 | + new SecurityManager(sparkConf)) |
| 99 | + } |
| 100 | + |
| 101 | + def createContainer(host: String): Container = { |
| 102 | + val containerId = ContainerId.newInstance(appAttemptId, containerNum) |
| 103 | + containerNum += 1 |
| 104 | + val nodeId = NodeId.newInstance(host, 1000) |
| 105 | + Container.newInstance(containerId, nodeId, "", containerResource, RM_REQUEST_PRIORITY, null) |
| 106 | + } |
| 107 | + |
| 108 | + test("single container allocated") { |
| 109 | + // request a single container and receive it |
| 110 | + val handler = createAllocator() |
| 111 | + handler.addResourceRequests(1) |
| 112 | + handler.getNumExecutorsRunning should be (0) |
| 113 | + handler.getNumPendingAllocate should be (1) |
| 114 | + |
| 115 | + val container = createContainer("host1") |
| 116 | + handler.handleAllocatedContainers(Array(container)) |
| 117 | + |
| 118 | + handler.getNumExecutorsRunning should be (1) |
| 119 | + handler.allocatedContainerToHostMap.get(container.getId).get should be ("host1") |
| 120 | + handler.allocatedHostToContainersMap.get("host1").get should contain (container.getId) |
| 121 | + rmClient.getMatchingRequests(container.getPriority, "host1", containerResource).size should be (0) |
| 122 | + } |
| 123 | + |
| 124 | + test("some containers allocated") { |
| 125 | + // request a few containers and receive some of them |
| 126 | + val handler = createAllocator() |
| 127 | + handler.addResourceRequests(4) |
| 128 | + handler.getNumExecutorsRunning should be (0) |
| 129 | + handler.getNumPendingAllocate should be (4) |
| 130 | + |
| 131 | + val container1 = createContainer("host1") |
| 132 | + val container2 = createContainer("host1") |
| 133 | + val container3 = createContainer("host2") |
| 134 | + handler.handleAllocatedContainers(Array(container1, container2, container3)) |
| 135 | + |
| 136 | + handler.getNumExecutorsRunning should be (3) |
| 137 | + handler.allocatedContainerToHostMap.get(container1.getId).get should be ("host1") |
| 138 | + handler.allocatedContainerToHostMap.get(container2.getId).get should be ("host1") |
| 139 | + handler.allocatedContainerToHostMap.get(container3.getId).get should be ("host2") |
| 140 | + handler.allocatedHostToContainersMap.get("host1").get should contain (container1.getId) |
| 141 | + handler.allocatedHostToContainersMap.get("host1").get should contain (container2.getId) |
| 142 | + handler.allocatedHostToContainersMap.get("host2").get should contain (container3.getId) |
| 143 | + } |
| 144 | + |
| 145 | + test("receive more containers than requested") { |
| 146 | + val handler = createAllocator(2) |
| 147 | + handler.addResourceRequests(2) |
| 148 | + handler.getNumExecutorsRunning should be (0) |
| 149 | + handler.getNumPendingAllocate should be (2) |
| 150 | + |
| 151 | + val container1 = createContainer("host1") |
| 152 | + val container2 = createContainer("host2") |
| 153 | + val container3 = createContainer("host4") |
| 154 | + handler.handleAllocatedContainers(Array(container1, container2, container3)) |
| 155 | + |
| 156 | + handler.getNumExecutorsRunning should be (2) |
| 157 | + handler.allocatedContainerToHostMap.get(container1.getId).get should be ("host1") |
| 158 | + handler.allocatedContainerToHostMap.get(container2.getId).get should be ("host2") |
| 159 | + handler.allocatedContainerToHostMap.contains(container3.getId) should be (false) |
| 160 | + handler.allocatedHostToContainersMap.get("host1").get should contain (container1.getId) |
| 161 | + handler.allocatedHostToContainersMap.get("host2").get should contain (container2.getId) |
| 162 | + handler.allocatedHostToContainersMap.contains("host4") should be (false) |
| 163 | + } |
22 | 164 |
|
23 | | -class YarnAllocatorSuite extends FunSuite { |
24 | 165 | test("memory exceeded diagnostic regexes") { |
25 | 166 | val diagnostics = |
26 | 167 | "Container [pid=12465,containerID=container_1412887393566_0003_01_000002] is running " + |
27 | | - "beyond physical memory limits. Current usage: 2.1 MB of 2 GB physical memory used; " + |
28 | | - "5.8 GB of 4.2 GB virtual memory used. Killing container." |
| 168 | + "beyond physical memory limits. Current usage: 2.1 MB of 2 GB physical memory used; " + |
| 169 | + "5.8 GB of 4.2 GB virtual memory used. Killing container." |
29 | 170 | val vmemMsg = memLimitExceededLogMessage(diagnostics, VMEM_EXCEEDED_PATTERN) |
30 | 171 | val pmemMsg = memLimitExceededLogMessage(diagnostics, PMEM_EXCEEDED_PATTERN) |
31 | 172 | assert(vmemMsg.contains("5.8 GB of 4.2 GB virtual memory used.")) |
32 | 173 | assert(pmemMsg.contains("2.1 MB of 2 GB physical memory used.")) |
33 | 174 | } |
| 175 | + |
34 | 176 | } |
0 commit comments